blob: e0d5e10700db0f776c92f219b5c501e184e6d86a [file] [log] [blame]
developerfd40db22021-04-29 10:08:25 +08001/* This program is free software; you can redistribute it and/or modify
2 * it under the terms of the GNU General Public License as published by
3 * the Free Software Foundation; version 2 of the License
4 *
5 * This program is distributed in the hope that it will be useful,
6 * but WITHOUT ANY WARRANTY; without even the implied warranty of
7 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
8 * GNU General Public License for more details.
9 *
10 * Copyright (C) 2014-2016 Sean Wang <sean.wang@mediatek.com>
11 * Copyright (C) 2016-2017 John Crispin <blogic@openwrt.org>
12 */
13
14#include <linux/netfilter_bridge.h>
15#include <linux/netfilter_ipv6.h>
16
17#include <net/arp.h>
18#include <net/neighbour.h>
19#include <net/netfilter/nf_conntrack_helper.h>
20#include <net/netfilter/nf_flow_table.h>
21#include <net/ipv6.h>
22#include <net/ip6_route.h>
23#include <net/ip.h>
24#include <net/tcp.h>
25#include <net/udp.h>
developer30a47682021-11-02 17:06:14 +080026#include <net/netfilter/nf_conntrack.h>
27#include <net/netfilter/nf_conntrack_acct.h>
developerfd40db22021-04-29 10:08:25 +080028
29#include "nf_hnat_mtk.h"
30#include "hnat.h"
31
32#include "../mtk_eth_soc.h"
developer8051e042022-04-08 13:26:36 +080033#include "../mtk_eth_reset.h"
developerfd40db22021-04-29 10:08:25 +080034
35#define do_ge2ext_fast(dev, skb) \
developerd35bbcc2022-09-28 22:46:01 +080036 ((IS_LAN_GRP(dev) || IS_WAN(dev) || IS_PPD(dev)) && \
developerfd40db22021-04-29 10:08:25 +080037 skb_hnat_is_hashed(skb) && \
38 skb_hnat_reason(skb) == HIT_BIND_FORCE_TO_CPU)
39#define do_ext2ge_fast_learn(dev, skb) \
40 (IS_PPD(dev) && \
41 (skb_hnat_sport(skb) == NR_PDMA_PORT || \
42 skb_hnat_sport(skb) == NR_QDMA_PORT) && \
43 ((get_dev_from_index(skb->vlan_tci & VLAN_VID_MASK)) || \
44 get_wandev_from_index(skb->vlan_tci & VLAN_VID_MASK)))
45#define do_mape_w2l_fast(dev, skb) \
46 (mape_toggle && IS_WAN(dev) && (!is_from_mape(skb)))
47
48static struct ipv6hdr mape_l2w_v6h;
49static struct ipv6hdr mape_w2l_v6h;
50static inline uint8_t get_wifi_hook_if_index_from_dev(const struct net_device *dev)
51{
52 int i;
53
54 for (i = 1; i < MAX_IF_NUM; i++) {
55 if (hnat_priv->wifi_hook_if[i] == dev)
56 return i;
57 }
58
59 return 0;
60}
61
62static inline int get_ext_device_number(void)
63{
64 int i, number = 0;
65
66 for (i = 0; i < MAX_EXT_DEVS && hnat_priv->ext_if[i]; i++)
67 number += 1;
68 return number;
69}
70
71static inline int find_extif_from_devname(const char *name)
72{
73 int i;
74 struct extdev_entry *ext_entry;
75
76 for (i = 0; i < MAX_EXT_DEVS && hnat_priv->ext_if[i]; i++) {
77 ext_entry = hnat_priv->ext_if[i];
78 if (!strcmp(name, ext_entry->name))
79 return 1;
80 }
81 return 0;
82}
83
84static inline int get_index_from_dev(const struct net_device *dev)
85{
86 int i;
87 struct extdev_entry *ext_entry;
88
89 for (i = 0; i < MAX_EXT_DEVS && hnat_priv->ext_if[i]; i++) {
90 ext_entry = hnat_priv->ext_if[i];
91 if (dev == ext_entry->dev)
92 return ext_entry->dev->ifindex;
93 }
94 return 0;
95}
96
97static inline struct net_device *get_dev_from_index(int index)
98{
99 int i;
100 struct extdev_entry *ext_entry;
101 struct net_device *dev = 0;
102
103 for (i = 0; i < MAX_EXT_DEVS && hnat_priv->ext_if[i]; i++) {
104 ext_entry = hnat_priv->ext_if[i];
105 if (ext_entry->dev && index == ext_entry->dev->ifindex) {
106 dev = ext_entry->dev;
107 break;
108 }
109 }
110 return dev;
111}
112
113static inline struct net_device *get_wandev_from_index(int index)
114{
developer8c9c0d02021-06-18 16:15:37 +0800115 if (!hnat_priv->g_wandev)
116 hnat_priv->g_wandev = dev_get_by_name(&init_net, hnat_priv->wan);
developerfd40db22021-04-29 10:08:25 +0800117
developer8c9c0d02021-06-18 16:15:37 +0800118 if (hnat_priv->g_wandev && hnat_priv->g_wandev->ifindex == index)
119 return hnat_priv->g_wandev;
developerfd40db22021-04-29 10:08:25 +0800120 return NULL;
121}
122
123static inline int extif_set_dev(struct net_device *dev)
124{
125 int i;
126 struct extdev_entry *ext_entry;
127
128 for (i = 0; i < MAX_EXT_DEVS && hnat_priv->ext_if[i]; i++) {
129 ext_entry = hnat_priv->ext_if[i];
130 if (!strcmp(dev->name, ext_entry->name) && !ext_entry->dev) {
131 dev_hold(dev);
132 ext_entry->dev = dev;
133 pr_info("%s(%s)\n", __func__, dev->name);
134
135 return ext_entry->dev->ifindex;
136 }
137 }
138
139 return -1;
140}
141
142static inline int extif_put_dev(struct net_device *dev)
143{
144 int i;
145 struct extdev_entry *ext_entry;
146
147 for (i = 0; i < MAX_EXT_DEVS && hnat_priv->ext_if[i]; i++) {
148 ext_entry = hnat_priv->ext_if[i];
149 if (ext_entry->dev == dev) {
150 ext_entry->dev = NULL;
151 dev_put(dev);
152 pr_info("%s(%s)\n", __func__, dev->name);
153
developerbc53e5f2021-05-21 10:07:17 +0800154 return 0;
developerfd40db22021-04-29 10:08:25 +0800155 }
156 }
157
158 return -1;
159}
160
161int ext_if_add(struct extdev_entry *ext_entry)
162{
163 int len = get_ext_device_number();
164
developer4c32b7a2021-11-13 16:46:43 +0800165 if (len < MAX_EXT_DEVS)
166 hnat_priv->ext_if[len++] = ext_entry;
167
developerfd40db22021-04-29 10:08:25 +0800168 return len;
169}
170
171int ext_if_del(struct extdev_entry *ext_entry)
172{
173 int i, j;
174
175 for (i = 0; i < MAX_EXT_DEVS; i++) {
176 if (hnat_priv->ext_if[i] == ext_entry) {
177 for (j = i; hnat_priv->ext_if[j] && j < MAX_EXT_DEVS - 1; j++)
178 hnat_priv->ext_if[j] = hnat_priv->ext_if[j + 1];
179 hnat_priv->ext_if[j] = NULL;
180 break;
181 }
182 }
183
184 return i;
185}
186
187void foe_clear_all_bind_entries(struct net_device *dev)
188{
developer471f6562021-05-10 20:48:34 +0800189 int i, hash_index;
developerfd40db22021-04-29 10:08:25 +0800190 struct foe_entry *entry;
191
developerd35bbcc2022-09-28 22:46:01 +0800192 if (!IS_LAN_GRP(dev) && !IS_WAN(dev) &&
developerfd40db22021-04-29 10:08:25 +0800193 !find_extif_from_devname(dev->name) &&
194 !dev->netdev_ops->ndo_flow_offload_check)
195 return;
196
developer471f6562021-05-10 20:48:34 +0800197 for (i = 0; i < CFG_PPE_NUM; i++) {
198 cr_set_field(hnat_priv->ppe_base[i] + PPE_TB_CFG,
199 SMA, SMA_ONLY_FWD_CPU);
200
201 for (hash_index = 0; hash_index < hnat_priv->foe_etry_num; hash_index++) {
202 entry = hnat_priv->foe_table_cpu[i] + hash_index;
203 if (entry->bfib1.state == BIND) {
204 entry->ipv4_hnapt.udib1.state = INVALID;
205 entry->ipv4_hnapt.udib1.time_stamp =
206 readl((hnat_priv->fe_base + 0x0010)) & 0xFF;
207 }
developerfd40db22021-04-29 10:08:25 +0800208 }
209 }
210
211 /* clear HWNAT cache */
212 hnat_cache_ebl(1);
213
214 mod_timer(&hnat_priv->hnat_sma_build_entry_timer, jiffies + 3 * HZ);
215}
216
217static void gmac_ppe_fwd_enable(struct net_device *dev)
218{
219 if (IS_LAN(dev) || IS_GMAC1_MODE)
developerd35bbcc2022-09-28 22:46:01 +0800220 set_gmac_ppe_fwd(NR_GMAC1_PORT, 1);
developerfd40db22021-04-29 10:08:25 +0800221 else if (IS_WAN(dev))
developerd35bbcc2022-09-28 22:46:01 +0800222 set_gmac_ppe_fwd(NR_GMAC2_PORT, 1);
223 else if (IS_LAN2(dev))
224 set_gmac_ppe_fwd(NR_GMAC3_PORT, 1);
developerfd40db22021-04-29 10:08:25 +0800225}
226
227int nf_hnat_netdevice_event(struct notifier_block *unused, unsigned long event,
228 void *ptr)
229{
230 struct net_device *dev;
231
232 dev = netdev_notifier_info_to_dev(ptr);
233
234 switch (event) {
235 case NETDEV_UP:
236 gmac_ppe_fwd_enable(dev);
237
238 extif_set_dev(dev);
239
240 break;
241 case NETDEV_GOING_DOWN:
242 if (!get_wifi_hook_if_index_from_dev(dev))
243 extif_put_dev(dev);
244
245 foe_clear_all_bind_entries(dev);
246
247 break;
developer8c9c0d02021-06-18 16:15:37 +0800248 case NETDEV_UNREGISTER:
developer1901f412022-01-04 17:22:00 +0800249 if (hnat_priv->g_ppdev == dev) {
developer8c9c0d02021-06-18 16:15:37 +0800250 hnat_priv->g_ppdev = NULL;
251 dev_put(dev);
252 }
developer1901f412022-01-04 17:22:00 +0800253 if (hnat_priv->g_wandev == dev) {
developer8c9c0d02021-06-18 16:15:37 +0800254 hnat_priv->g_wandev = NULL;
255 dev_put(dev);
256 }
257
258 break;
259 case NETDEV_REGISTER:
260 if (IS_PPD(dev) && !hnat_priv->g_ppdev)
261 hnat_priv->g_ppdev = dev_get_by_name(&init_net, hnat_priv->ppd);
262 if (IS_WAN(dev) && !hnat_priv->g_wandev)
263 hnat_priv->g_wandev = dev_get_by_name(&init_net, hnat_priv->wan);
264
265 break;
developer8051e042022-04-08 13:26:36 +0800266 case MTK_FE_RESET_NAT_DONE:
267 pr_info("[%s] HNAT driver starts to do warm init !\n", __func__);
268 hnat_warm_init();
269 break;
developerfd40db22021-04-29 10:08:25 +0800270 default:
271 break;
272 }
273
274 return NOTIFY_DONE;
275}
276
277void foe_clear_entry(struct neighbour *neigh)
278{
279 u32 *daddr = (u32 *)neigh->primary_key;
280 unsigned char h_dest[ETH_ALEN];
281 struct foe_entry *entry;
developer471f6562021-05-10 20:48:34 +0800282 int i, hash_index;
developerfd40db22021-04-29 10:08:25 +0800283 u32 dip;
284
285 dip = (u32)(*daddr);
286
developer471f6562021-05-10 20:48:34 +0800287 for (i = 0; i < CFG_PPE_NUM; i++) {
developer8051e042022-04-08 13:26:36 +0800288 if (!hnat_priv->foe_table_cpu[i])
289 continue;
290
developer471f6562021-05-10 20:48:34 +0800291 for (hash_index = 0; hash_index < hnat_priv->foe_etry_num; hash_index++) {
292 entry = hnat_priv->foe_table_cpu[i] + hash_index;
293 if (entry->bfib1.state == BIND &&
294 entry->ipv4_hnapt.new_dip == ntohl(dip)) {
295 *((u32 *)h_dest) = swab32(entry->ipv4_hnapt.dmac_hi);
296 *((u16 *)&h_dest[4]) =
297 swab16(entry->ipv4_hnapt.dmac_lo);
298 if (strncmp(h_dest, neigh->ha, ETH_ALEN) != 0) {
299 pr_info("%s: state=%d\n", __func__,
300 neigh->nud_state);
301 cr_set_field(hnat_priv->ppe_base[i] + PPE_TB_CFG,
302 SMA, SMA_ONLY_FWD_CPU);
developerfd40db22021-04-29 10:08:25 +0800303
developer471f6562021-05-10 20:48:34 +0800304 entry->ipv4_hnapt.udib1.state = INVALID;
305 entry->ipv4_hnapt.udib1.time_stamp =
306 readl((hnat_priv->fe_base + 0x0010)) & 0xFF;
developerfd40db22021-04-29 10:08:25 +0800307
developer471f6562021-05-10 20:48:34 +0800308 /* clear HWNAT cache */
309 hnat_cache_ebl(1);
developerfd40db22021-04-29 10:08:25 +0800310
developer471f6562021-05-10 20:48:34 +0800311 mod_timer(&hnat_priv->hnat_sma_build_entry_timer,
312 jiffies + 3 * HZ);
developerfd40db22021-04-29 10:08:25 +0800313
developer471f6562021-05-10 20:48:34 +0800314 pr_info("Delete old entry: dip =%pI4\n", &dip);
315 pr_info("Old mac= %pM\n", h_dest);
316 pr_info("New mac= %pM\n", neigh->ha);
317 }
developerfd40db22021-04-29 10:08:25 +0800318 }
319 }
320 }
321}
322
323int nf_hnat_netevent_handler(struct notifier_block *unused, unsigned long event,
324 void *ptr)
325{
326 struct net_device *dev = NULL;
327 struct neighbour *neigh = NULL;
328
329 switch (event) {
330 case NETEVENT_NEIGH_UPDATE:
331 neigh = ptr;
332 dev = neigh->dev;
333 if (dev)
334 foe_clear_entry(neigh);
335 break;
336 }
337
338 return NOTIFY_DONE;
339}
340
341unsigned int mape_add_ipv6_hdr(struct sk_buff *skb, struct ipv6hdr mape_ip6h)
342{
343 struct ethhdr *eth = NULL;
344 struct ipv6hdr *ip6h = NULL;
345 struct iphdr *iph = NULL;
346
347 if (skb_headroom(skb) < IPV6_HDR_LEN || skb_shared(skb) ||
348 (skb_cloned(skb) && !skb_clone_writable(skb, 0))) {
349 return -1;
350 }
351
352 /* point to L3 */
353 memcpy(skb->data - IPV6_HDR_LEN - ETH_HLEN, skb_push(skb, ETH_HLEN), ETH_HLEN);
354 memcpy(skb_push(skb, IPV6_HDR_LEN - ETH_HLEN), &mape_ip6h, IPV6_HDR_LEN);
355
356 eth = (struct ethhdr *)(skb->data - ETH_HLEN);
357 eth->h_proto = htons(ETH_P_IPV6);
358 skb->protocol = htons(ETH_P_IPV6);
359
360 iph = (struct iphdr *)(skb->data + IPV6_HDR_LEN);
361 ip6h = (struct ipv6hdr *)(skb->data);
362 ip6h->payload_len = iph->tot_len; /* maybe different with ipv4 */
363
364 skb_set_network_header(skb, 0);
365 skb_set_transport_header(skb, iph->ihl * 4 + IPV6_HDR_LEN);
366 return 0;
367}
368
369static void fix_skb_packet_type(struct sk_buff *skb, struct net_device *dev,
370 struct ethhdr *eth)
371{
372 skb->pkt_type = PACKET_HOST;
373 if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
374 if (ether_addr_equal_64bits(eth->h_dest, dev->broadcast))
375 skb->pkt_type = PACKET_BROADCAST;
376 else
377 skb->pkt_type = PACKET_MULTICAST;
378 }
379}
380
381unsigned int do_hnat_ext_to_ge(struct sk_buff *skb, const struct net_device *in,
382 const char *func)
383{
384 if (hnat_priv->g_ppdev && hnat_priv->g_ppdev->flags & IFF_UP) {
385 u16 vlan_id = 0;
386 skb_set_network_header(skb, 0);
387 skb_push(skb, ETH_HLEN);
388 set_to_ppe(skb);
389
390 vlan_id = skb_vlan_tag_get_id(skb);
391 if (vlan_id) {
392 skb = vlan_insert_tag(skb, skb->vlan_proto, skb->vlan_tci);
393 if (!skb)
394 return -1;
395 }
396
397 /*set where we come from*/
398 skb->vlan_proto = htons(ETH_P_8021Q);
399 skb->vlan_tci =
400 (VLAN_CFI_MASK | (in->ifindex & VLAN_VID_MASK));
401 trace_printk(
402 "%s: vlan_prot=0x%x, vlan_tci=%x, in->name=%s, skb->dev->name=%s\n",
403 __func__, ntohs(skb->vlan_proto), skb->vlan_tci,
404 in->name, hnat_priv->g_ppdev->name);
405 skb->dev = hnat_priv->g_ppdev;
406 dev_queue_xmit(skb);
407 trace_printk("%s: called from %s successfully\n", __func__, func);
408 return 0;
409 }
410
411 trace_printk("%s: called from %s fail\n", __func__, func);
412 return -1;
413}
414
415unsigned int do_hnat_ext_to_ge2(struct sk_buff *skb, const char *func)
416{
417 struct ethhdr *eth = eth_hdr(skb);
418 struct net_device *dev;
419 struct foe_entry *entry;
420
421 trace_printk("%s: vlan_prot=0x%x, vlan_tci=%x\n", __func__,
422 ntohs(skb->vlan_proto), skb->vlan_tci);
423
developer577ad2f2022-11-28 10:33:36 +0800424 if (skb_hnat_entry(skb) >= hnat_priv->foe_etry_num ||
425 skb_hnat_ppe(skb) >= CFG_PPE_NUM)
426 return -1;
427
developerfd40db22021-04-29 10:08:25 +0800428 dev = get_dev_from_index(skb->vlan_tci & VLAN_VID_MASK);
429
430 if (dev) {
431 /*set where we to go*/
432 skb->dev = dev;
433 skb->vlan_proto = 0;
434 skb->vlan_tci = 0;
435
436 if (ntohs(eth->h_proto) == ETH_P_8021Q) {
437 skb = skb_vlan_untag(skb);
438 if (unlikely(!skb))
439 return -1;
440 }
441
442 if (IS_BOND_MODE &&
developer4164cfe2022-12-01 11:27:41 +0800443 (((hnat_priv->data->version == MTK_HNAT_V2 ||
444 hnat_priv->data->version == MTK_HNAT_V3) &&
developerfd40db22021-04-29 10:08:25 +0800445 (skb_hnat_entry(skb) != 0x7fff)) ||
developer4164cfe2022-12-01 11:27:41 +0800446 ((hnat_priv->data->version != MTK_HNAT_V2 &&
447 hnat_priv->data->version != MTK_HNAT_V3) &&
developerfd40db22021-04-29 10:08:25 +0800448 (skb_hnat_entry(skb) != 0x3fff))))
449 skb_set_hash(skb, skb_hnat_entry(skb) >> 1, PKT_HASH_TYPE_L4);
450
451 set_from_extge(skb);
452 fix_skb_packet_type(skb, skb->dev, eth);
453 netif_rx(skb);
454 trace_printk("%s: called from %s successfully\n", __func__,
455 func);
456 return 0;
457 } else {
458 /* MapE WAN --> LAN/WLAN PingPong. */
459 dev = get_wandev_from_index(skb->vlan_tci & VLAN_VID_MASK);
460 if (mape_toggle && dev) {
461 if (!mape_add_ipv6_hdr(skb, mape_w2l_v6h)) {
462 skb_set_mac_header(skb, -ETH_HLEN);
463 skb->dev = dev;
464 set_from_mape(skb);
465 skb->vlan_proto = 0;
466 skb->vlan_tci = 0;
467 fix_skb_packet_type(skb, skb->dev, eth_hdr(skb));
developer471f6562021-05-10 20:48:34 +0800468 entry = &hnat_priv->foe_table_cpu[skb_hnat_ppe(skb)][skb_hnat_entry(skb)];
developerfd40db22021-04-29 10:08:25 +0800469 entry->bfib1.pkt_type = IPV4_HNAPT;
470 netif_rx(skb);
471 return 0;
472 }
473 }
474 trace_printk("%s: called from %s fail\n", __func__, func);
475 return -1;
476 }
477}
478
479unsigned int do_hnat_ge_to_ext(struct sk_buff *skb, const char *func)
480{
481 /*set where we to go*/
482 u8 index;
483 struct foe_entry *entry;
484 struct net_device *dev;
485
developer577ad2f2022-11-28 10:33:36 +0800486 if (skb_hnat_entry(skb) >= hnat_priv->foe_etry_num ||
487 skb_hnat_ppe(skb) >= CFG_PPE_NUM)
488 return -1;
489
developer471f6562021-05-10 20:48:34 +0800490 entry = &hnat_priv->foe_table_cpu[skb_hnat_ppe(skb)][skb_hnat_entry(skb)];
developerfd40db22021-04-29 10:08:25 +0800491
492 if (IS_IPV4_GRP(entry))
493 index = entry->ipv4_hnapt.act_dp;
494 else
495 index = entry->ipv6_5t_route.act_dp;
496
developerdce18f52023-03-18 22:11:13 +0800497 dev = get_dev_from_index(index);
498 if (!dev) {
499 trace_printk("%s: called from %s. Get wifi interface fail\n",
500 __func__, func);
501 return 0;
502 }
503
504 skb->dev = dev;
developerfd40db22021-04-29 10:08:25 +0800505
developer34028fb2022-01-11 13:51:29 +0800506 if (IS_HQOS_MODE && eth_hdr(skb)->h_proto == HQOS_MAGIC_TAG) {
developerfd40db22021-04-29 10:08:25 +0800507 skb = skb_unshare(skb, GFP_ATOMIC);
508 if (!skb)
509 return NF_ACCEPT;
510
511 if (unlikely(!pskb_may_pull(skb, VLAN_HLEN)))
512 return NF_ACCEPT;
513
514 skb_pull_rcsum(skb, VLAN_HLEN);
515
516 memmove(skb->data - ETH_HLEN, skb->data - ETH_HLEN - VLAN_HLEN,
517 2 * ETH_ALEN);
518 }
developerfd40db22021-04-29 10:08:25 +0800519
520 if (skb->dev) {
521 skb_set_network_header(skb, 0);
522 skb_push(skb, ETH_HLEN);
523 dev_queue_xmit(skb);
524 trace_printk("%s: called from %s successfully\n", __func__,
525 func);
526 return 0;
527 } else {
528 if (mape_toggle) {
529 /* Add ipv6 header mape for lan/wlan -->wan */
530 dev = get_wandev_from_index(index);
531 if (dev) {
532 if (!mape_add_ipv6_hdr(skb, mape_l2w_v6h)) {
533 skb_set_network_header(skb, 0);
534 skb_push(skb, ETH_HLEN);
535 skb_set_mac_header(skb, 0);
536 skb->dev = dev;
537 dev_queue_xmit(skb);
538 return 0;
539 }
540 trace_printk("%s: called from %s fail[MapE]\n", __func__,
541 func);
542 return -1;
543 }
544 }
545 }
546 /*if external devices is down, invalidate related ppe entry*/
547 if (entry_hnat_is_bound(entry)) {
548 entry->bfib1.state = INVALID;
549 if (IS_IPV4_GRP(entry))
550 entry->ipv4_hnapt.act_dp = 0;
551 else
552 entry->ipv6_5t_route.act_dp = 0;
553
554 /* clear HWNAT cache */
555 hnat_cache_ebl(1);
556 }
557 trace_printk("%s: called from %s fail, index=%x\n", __func__,
558 func, index);
559 return -1;
560}
561
562static void pre_routing_print(struct sk_buff *skb, const struct net_device *in,
563 const struct net_device *out, const char *func)
564{
565 trace_printk(
566 "[%s]: %s(iif=0x%x CB2=0x%x)-->%s (ppe_hash=0x%x) sport=0x%x reason=0x%x alg=0x%x from %s\n",
567 __func__, in->name, skb_hnat_iface(skb),
568 HNAT_SKB_CB2(skb)->magic, out->name, skb_hnat_entry(skb),
569 skb_hnat_sport(skb), skb_hnat_reason(skb), skb_hnat_alg(skb),
570 func);
571}
572
573static void post_routing_print(struct sk_buff *skb, const struct net_device *in,
574 const struct net_device *out, const char *func)
575{
576 trace_printk(
577 "[%s]: %s(iif=0x%x, CB2=0x%x)-->%s (ppe_hash=0x%x) sport=0x%x reason=0x%x alg=0x%x from %s\n",
578 __func__, in->name, skb_hnat_iface(skb),
579 HNAT_SKB_CB2(skb)->magic, out->name, skb_hnat_entry(skb),
580 skb_hnat_sport(skb), skb_hnat_reason(skb), skb_hnat_alg(skb),
581 func);
582}
583
584static inline void hnat_set_iif(const struct nf_hook_state *state,
585 struct sk_buff *skb, int val)
586{
developer40017972021-06-29 14:27:35 +0800587 if (IS_WHNAT(state->in) && FROM_WED(skb)) {
developere567ad32021-05-25 17:16:17 +0800588 return;
589 } else if (IS_LAN(state->in)) {
developerfd40db22021-04-29 10:08:25 +0800590 skb_hnat_iface(skb) = FOE_MAGIC_GE_LAN;
developerd35bbcc2022-09-28 22:46:01 +0800591 } else if (IS_LAN2(state->in)) {
592 skb_hnat_iface(skb) = FOE_MAGIC_GE_LAN2;
developerfd40db22021-04-29 10:08:25 +0800593 } else if (IS_PPD(state->in)) {
594 skb_hnat_iface(skb) = FOE_MAGIC_GE_PPD;
595 } else if (IS_EXT(state->in)) {
596 skb_hnat_iface(skb) = FOE_MAGIC_EXT;
597 } else if (IS_WAN(state->in)) {
598 skb_hnat_iface(skb) = FOE_MAGIC_GE_WAN;
developerfd40db22021-04-29 10:08:25 +0800599 } else if (!IS_BR(state->in)) {
developer99506e52021-06-30 22:03:02 +0800600 if (state->in->netdev_ops->ndo_flow_offload_check) {
601 skb_hnat_iface(skb) = FOE_MAGIC_GE_VIRTUAL;
602 } else {
603 skb_hnat_iface(skb) = FOE_INVALID;
developerfd40db22021-04-29 10:08:25 +0800604
developer99506e52021-06-30 22:03:02 +0800605 if (is_magic_tag_valid(skb) &&
606 IS_SPACE_AVAILABLE_HEAD(skb))
607 memset(skb_hnat_info(skb), 0, FOE_INFO_LEN);
608 }
developerfd40db22021-04-29 10:08:25 +0800609 }
610}
611
612static inline void hnat_set_alg(const struct nf_hook_state *state,
613 struct sk_buff *skb, int val)
614{
615 skb_hnat_alg(skb) = val;
616}
617
618static inline void hnat_set_head_frags(const struct nf_hook_state *state,
619 struct sk_buff *head_skb, int val,
620 void (*fn)(const struct nf_hook_state *state,
621 struct sk_buff *skb, int val))
622{
623 struct sk_buff *segs = skb_shinfo(head_skb)->frag_list;
624
625 fn(state, head_skb, val);
626 while (segs) {
627 fn(state, segs, val);
628 segs = segs->next;
629 }
630}
631
developer25fc8c02022-05-06 16:24:02 +0800632static void ppe_fill_flow_lbl(struct foe_entry *entry, struct ipv6hdr *ip6h)
633{
634 entry->ipv4_dslite.flow_lbl[0] = ip6h->flow_lbl[2];
635 entry->ipv4_dslite.flow_lbl[1] = ip6h->flow_lbl[1];
636 entry->ipv4_dslite.flow_lbl[2] = ip6h->flow_lbl[0];
637}
638
developerfd40db22021-04-29 10:08:25 +0800639unsigned int do_hnat_mape_w2l_fast(struct sk_buff *skb, const struct net_device *in,
640 const char *func)
641{
642 struct ipv6hdr *ip6h = ipv6_hdr(skb);
643 struct iphdr _iphdr;
644 struct iphdr *iph;
645 struct ethhdr *eth;
646
647 /* WAN -> LAN/WLAN MapE. */
648 if (mape_toggle && (ip6h->nexthdr == NEXTHDR_IPIP)) {
649 iph = skb_header_pointer(skb, IPV6_HDR_LEN, sizeof(_iphdr), &_iphdr);
developer4c32b7a2021-11-13 16:46:43 +0800650 if (unlikely(!iph))
651 return -1;
652
developerfd40db22021-04-29 10:08:25 +0800653 switch (iph->protocol) {
654 case IPPROTO_UDP:
655 case IPPROTO_TCP:
656 break;
657 default:
658 return -1;
659 }
660 mape_w2l_v6h = *ip6h;
661
662 /* Remove ipv6 header. */
663 memcpy(skb->data + IPV6_HDR_LEN - ETH_HLEN,
664 skb->data - ETH_HLEN, ETH_HLEN);
665 skb_pull(skb, IPV6_HDR_LEN - ETH_HLEN);
666 skb_set_mac_header(skb, 0);
667 skb_set_network_header(skb, ETH_HLEN);
668 skb_set_transport_header(skb, ETH_HLEN + sizeof(_iphdr));
669
670 eth = eth_hdr(skb);
671 eth->h_proto = htons(ETH_P_IP);
672 set_to_ppe(skb);
673
674 skb->vlan_proto = htons(ETH_P_8021Q);
675 skb->vlan_tci =
676 (VLAN_CFI_MASK | (in->ifindex & VLAN_VID_MASK));
677
678 if (!hnat_priv->g_ppdev)
679 hnat_priv->g_ppdev = dev_get_by_name(&init_net, hnat_priv->ppd);
680
681 skb->dev = hnat_priv->g_ppdev;
682 skb->protocol = htons(ETH_P_IP);
683
684 dev_queue_xmit(skb);
685
686 return 0;
687 }
688 return -1;
689}
690
developere8b7dfa2023-04-20 10:16:44 +0800691void mtk_464xlat_pre_process(struct sk_buff *skb)
692{
693 struct foe_entry *foe;
694
695 foe = &hnat_priv->foe_table_cpu[skb_hnat_ppe(skb)][skb_hnat_entry(skb)];
696 if (foe->bfib1.state != BIND &&
697 skb_hnat_reason(skb) == HIT_UNBIND_RATE_REACH)
698 memcpy(&headroom[skb_hnat_entry(skb)], skb->head,
699 sizeof(struct hnat_desc));
developer25fc8c02022-05-06 16:24:02 +0800700
developere8b7dfa2023-04-20 10:16:44 +0800701 if (foe->bfib1.state == BIND)
702 memset(&headroom[skb_hnat_entry(skb)], 0,
703 sizeof(struct hnat_desc));
704}
developer25fc8c02022-05-06 16:24:02 +0800705
developerfd40db22021-04-29 10:08:25 +0800706static unsigned int is_ppe_support_type(struct sk_buff *skb)
707{
708 struct ethhdr *eth = NULL;
709 struct iphdr *iph = NULL;
710 struct ipv6hdr *ip6h = NULL;
711 struct iphdr _iphdr;
712
713 eth = eth_hdr(skb);
developerfd2d7422021-06-09 17:09:39 +0800714 if (!is_magic_tag_valid(skb) || !IS_SPACE_AVAILABLE_HEAD(skb) ||
developerb254f762022-01-20 20:06:25 +0800715 is_broadcast_ether_addr(eth->h_dest))
developerfd40db22021-04-29 10:08:25 +0800716 return 0;
717
718 switch (ntohs(skb->protocol)) {
719 case ETH_P_IP:
720 iph = ip_hdr(skb);
721
722 /* do not accelerate non tcp/udp traffic */
723 if ((iph->protocol == IPPROTO_TCP) ||
724 (iph->protocol == IPPROTO_UDP) ||
725 (iph->protocol == IPPROTO_IPV6)) {
726 return 1;
727 }
728
729 break;
730 case ETH_P_IPV6:
731 ip6h = ipv6_hdr(skb);
732
733 if ((ip6h->nexthdr == NEXTHDR_TCP) ||
734 (ip6h->nexthdr == NEXTHDR_UDP)) {
735 return 1;
736 } else if (ip6h->nexthdr == NEXTHDR_IPIP) {
737 iph = skb_header_pointer(skb, IPV6_HDR_LEN,
738 sizeof(_iphdr), &_iphdr);
developer4c32b7a2021-11-13 16:46:43 +0800739 if (unlikely(!iph))
740 return 0;
developerfd40db22021-04-29 10:08:25 +0800741
742 if ((iph->protocol == IPPROTO_TCP) ||
743 (iph->protocol == IPPROTO_UDP)) {
744 return 1;
745 }
746
747 }
748
749 break;
750 case ETH_P_8021Q:
751 return 1;
752 }
753
754 return 0;
755}
756
757static unsigned int
758mtk_hnat_ipv6_nf_pre_routing(void *priv, struct sk_buff *skb,
759 const struct nf_hook_state *state)
760{
developer577ad2f2022-11-28 10:33:36 +0800761 if (!skb)
762 goto drop;
763
developerfd40db22021-04-29 10:08:25 +0800764 if (!is_ppe_support_type(skb)) {
765 hnat_set_head_frags(state, skb, 1, hnat_set_alg);
766 return NF_ACCEPT;
767 }
768
769 hnat_set_head_frags(state, skb, -1, hnat_set_iif);
770
771 pre_routing_print(skb, state->in, state->out, __func__);
772
developerfd40db22021-04-29 10:08:25 +0800773 /* packets from external devices -> xxx ,step 1 , learning stage & bound stage*/
774 if (do_ext2ge_fast_try(state->in, skb)) {
775 if (!do_hnat_ext_to_ge(skb, state->in, __func__))
776 return NF_STOLEN;
developerfd40db22021-04-29 10:08:25 +0800777 return NF_ACCEPT;
778 }
779
780 /* packets form ge -> external device
781 * For standalone wan interface
782 */
783 if (do_ge2ext_fast(state->in, skb)) {
784 if (!do_hnat_ge_to_ext(skb, __func__))
785 return NF_STOLEN;
786 goto drop;
787 }
788
developerf4c370a2022-10-08 17:01:19 +0800789
790#if !(defined(CONFIG_MEDIATEK_NETSYS_V2) || defined(CONFIG_MEDIATEK_NETSYS_V3))
developerfd40db22021-04-29 10:08:25 +0800791 /* MapE need remove ipv6 header and pingpong. */
792 if (do_mape_w2l_fast(state->in, skb)) {
793 if (!do_hnat_mape_w2l_fast(skb, state->in, __func__))
794 return NF_STOLEN;
795 else
796 return NF_ACCEPT;
797 }
798
799 if (is_from_mape(skb))
800 clr_from_extge(skb);
developerf4c370a2022-10-08 17:01:19 +0800801#endif
developere8b7dfa2023-04-20 10:16:44 +0800802 if (xlat_toggle)
803 mtk_464xlat_pre_process(skb);
804
developerfd40db22021-04-29 10:08:25 +0800805 return NF_ACCEPT;
806drop:
developer577ad2f2022-11-28 10:33:36 +0800807 if (skb)
808 printk_ratelimited(KERN_WARNING
809 "%s:drop (in_dev=%s, iif=0x%x, CB2=0x%x, ppe_hash=0x%x,\n"
810 "sport=0x%x, reason=0x%x, alg=0x%x)\n",
811 __func__, state->in->name, skb_hnat_iface(skb),
812 HNAT_SKB_CB2(skb)->magic, skb_hnat_entry(skb),
813 skb_hnat_sport(skb), skb_hnat_reason(skb),
814 skb_hnat_alg(skb));
developerfd40db22021-04-29 10:08:25 +0800815
816 return NF_DROP;
817}
818
819static unsigned int
820mtk_hnat_ipv4_nf_pre_routing(void *priv, struct sk_buff *skb,
821 const struct nf_hook_state *state)
822{
developer577ad2f2022-11-28 10:33:36 +0800823 if (!skb)
824 goto drop;
825
developerfd40db22021-04-29 10:08:25 +0800826 if (!is_ppe_support_type(skb)) {
827 hnat_set_head_frags(state, skb, 1, hnat_set_alg);
828 return NF_ACCEPT;
829 }
830
831 hnat_set_head_frags(state, skb, -1, hnat_set_iif);
832
833 pre_routing_print(skb, state->in, state->out, __func__);
834
developerfd40db22021-04-29 10:08:25 +0800835 /* packets from external devices -> xxx ,step 1 , learning stage & bound stage*/
836 if (do_ext2ge_fast_try(state->in, skb)) {
837 if (!do_hnat_ext_to_ge(skb, state->in, __func__))
838 return NF_STOLEN;
developerfd40db22021-04-29 10:08:25 +0800839 return NF_ACCEPT;
840 }
841
842 /* packets form ge -> external device
843 * For standalone wan interface
844 */
845 if (do_ge2ext_fast(state->in, skb)) {
846 if (!do_hnat_ge_to_ext(skb, __func__))
847 return NF_STOLEN;
848 goto drop;
849 }
developere8b7dfa2023-04-20 10:16:44 +0800850 if (xlat_toggle)
851 mtk_464xlat_pre_process(skb);
developerfd40db22021-04-29 10:08:25 +0800852
853 return NF_ACCEPT;
854drop:
developer577ad2f2022-11-28 10:33:36 +0800855 if (skb)
856 printk_ratelimited(KERN_WARNING
857 "%s:drop (in_dev=%s, iif=0x%x, CB2=0x%x, ppe_hash=0x%x,\n"
858 "sport=0x%x, reason=0x%x, alg=0x%x)\n",
859 __func__, state->in->name, skb_hnat_iface(skb),
860 HNAT_SKB_CB2(skb)->magic, skb_hnat_entry(skb),
861 skb_hnat_sport(skb), skb_hnat_reason(skb),
862 skb_hnat_alg(skb));
developerfd40db22021-04-29 10:08:25 +0800863
864 return NF_DROP;
865}
866
867static unsigned int
868mtk_hnat_br_nf_local_in(void *priv, struct sk_buff *skb,
869 const struct nf_hook_state *state)
870{
developerfd40db22021-04-29 10:08:25 +0800871 struct vlan_ethhdr *veth;
872
developer577ad2f2022-11-28 10:33:36 +0800873 if (!skb)
874 goto drop;
875
developer34028fb2022-01-11 13:51:29 +0800876 if (IS_HQOS_MODE && hnat_priv->data->whnat) {
developerfd40db22021-04-29 10:08:25 +0800877 veth = (struct vlan_ethhdr *)skb_mac_header(skb);
878
879 if (eth_hdr(skb)->h_proto == HQOS_MAGIC_TAG) {
880 skb_hnat_entry(skb) = ntohs(veth->h_vlan_TCI) & 0x3fff;
881 skb_hnat_reason(skb) = HIT_BIND_FORCE_TO_CPU;
882 }
883 }
developerfd40db22021-04-29 10:08:25 +0800884
885 if (!HAS_HQOS_MAGIC_TAG(skb) && !is_ppe_support_type(skb)) {
886 hnat_set_head_frags(state, skb, 1, hnat_set_alg);
887 return NF_ACCEPT;
888 }
889
890 hnat_set_head_frags(state, skb, -1, hnat_set_iif);
891
892 pre_routing_print(skb, state->in, state->out, __func__);
893
894 if (unlikely(debug_level >= 7)) {
895 hnat_cpu_reason_cnt(skb);
896 if (skb_hnat_reason(skb) == dbg_cpu_reason)
897 foe_dump_pkt(skb);
898 }
899
developerfd40db22021-04-29 10:08:25 +0800900 /* packets from external devices -> xxx ,step 1 , learning stage & bound stage*/
901 if ((skb_hnat_iface(skb) == FOE_MAGIC_EXT) && !is_from_extge(skb) &&
902 !is_multicast_ether_addr(eth_hdr(skb)->h_dest)) {
903 if (!hnat_priv->g_ppdev)
904 hnat_priv->g_ppdev = dev_get_by_name(&init_net, hnat_priv->ppd);
905
906 if (!do_hnat_ext_to_ge(skb, state->in, __func__))
907 return NF_STOLEN;
developerfd40db22021-04-29 10:08:25 +0800908 return NF_ACCEPT;
909 }
910
911 if (hnat_priv->data->whnat) {
912 if (skb_hnat_iface(skb) == FOE_MAGIC_EXT)
913 clr_from_extge(skb);
914
915 /* packets from external devices -> xxx ,step 2, learning stage */
developeraf07fad2021-11-19 17:53:42 +0800916 if (do_ext2ge_fast_learn(state->in, skb) && (!qos_toggle ||
917 (qos_toggle && eth_hdr(skb)->h_proto != HQOS_MAGIC_TAG))) {
developerfd40db22021-04-29 10:08:25 +0800918 if (!do_hnat_ext_to_ge2(skb, __func__))
919 return NF_STOLEN;
920 goto drop;
921 }
922
923 /* packets form ge -> external device */
924 if (do_ge2ext_fast(state->in, skb)) {
925 if (!do_hnat_ge_to_ext(skb, __func__))
926 return NF_STOLEN;
927 goto drop;
928 }
929 }
930
developerf4c370a2022-10-08 17:01:19 +0800931#if !(defined(CONFIG_MEDIATEK_NETSYS_V2) || defined(CONFIG_MEDIATEK_NETSYS_V3))
developerfd40db22021-04-29 10:08:25 +0800932 /* MapE need remove ipv6 header and pingpong. (bridge mode) */
933 if (do_mape_w2l_fast(state->in, skb)) {
934 if (!do_hnat_mape_w2l_fast(skb, state->in, __func__))
935 return NF_STOLEN;
936 else
937 return NF_ACCEPT;
938 }
developerf4c370a2022-10-08 17:01:19 +0800939#endif
developerfd40db22021-04-29 10:08:25 +0800940 return NF_ACCEPT;
941drop:
developer577ad2f2022-11-28 10:33:36 +0800942 if (skb)
943 printk_ratelimited(KERN_WARNING
944 "%s:drop (in_dev=%s, iif=0x%x, CB2=0x%x, ppe_hash=0x%x,\n"
945 "sport=0x%x, reason=0x%x, alg=0x%x)\n",
946 __func__, state->in->name, skb_hnat_iface(skb),
947 HNAT_SKB_CB2(skb)->magic, skb_hnat_entry(skb),
948 skb_hnat_sport(skb), skb_hnat_reason(skb),
949 skb_hnat_alg(skb));
developerfd40db22021-04-29 10:08:25 +0800950
951 return NF_DROP;
952}
953
954static unsigned int hnat_ipv6_get_nexthop(struct sk_buff *skb,
955 const struct net_device *out,
956 struct flow_offload_hw_path *hw_path)
957{
958 const struct in6_addr *ipv6_nexthop;
959 struct neighbour *neigh = NULL;
960 struct dst_entry *dst = skb_dst(skb);
961 struct ethhdr *eth;
962
963 if (hw_path->flags & FLOW_OFFLOAD_PATH_PPPOE) {
964 memcpy(eth_hdr(skb)->h_source, hw_path->eth_src, ETH_ALEN);
965 memcpy(eth_hdr(skb)->h_dest, hw_path->eth_dest, ETH_ALEN);
966 return 0;
967 }
968
969 rcu_read_lock_bh();
970 ipv6_nexthop =
971 rt6_nexthop((struct rt6_info *)dst, &ipv6_hdr(skb)->daddr);
972 neigh = __ipv6_neigh_lookup_noref(dst->dev, ipv6_nexthop);
973 if (unlikely(!neigh)) {
974 dev_notice(hnat_priv->dev, "%s:No neigh (daddr=%pI6)\n", __func__,
975 &ipv6_hdr(skb)->daddr);
976 rcu_read_unlock_bh();
977 return -1;
978 }
979
980 /* why do we get all zero ethernet address ? */
981 if (!is_valid_ether_addr(neigh->ha)) {
982 rcu_read_unlock_bh();
983 return -1;
984 }
985
986 if (ipv6_hdr(skb)->nexthdr == NEXTHDR_IPIP) {
987 /*copy ether type for DS-Lite and MapE */
988 eth = (struct ethhdr *)(skb->data - ETH_HLEN);
989 eth->h_proto = skb->protocol;
990 } else {
991 eth = eth_hdr(skb);
992 }
993
994 ether_addr_copy(eth->h_dest, neigh->ha);
995 ether_addr_copy(eth->h_source, out->dev_addr);
996
997 rcu_read_unlock_bh();
998
999 return 0;
1000}
1001
1002static unsigned int hnat_ipv4_get_nexthop(struct sk_buff *skb,
1003 const struct net_device *out,
1004 struct flow_offload_hw_path *hw_path)
1005{
1006 u32 nexthop;
1007 struct neighbour *neigh;
1008 struct dst_entry *dst = skb_dst(skb);
1009 struct rtable *rt = (struct rtable *)dst;
1010 struct net_device *dev = (__force struct net_device *)out;
1011
1012 if (hw_path->flags & FLOW_OFFLOAD_PATH_PPPOE) {
1013 memcpy(eth_hdr(skb)->h_source, hw_path->eth_src, ETH_ALEN);
1014 memcpy(eth_hdr(skb)->h_dest, hw_path->eth_dest, ETH_ALEN);
1015 return 0;
1016 }
1017
1018 rcu_read_lock_bh();
1019 nexthop = (__force u32)rt_nexthop(rt, ip_hdr(skb)->daddr);
1020 neigh = __ipv4_neigh_lookup_noref(dev, nexthop);
1021 if (unlikely(!neigh)) {
1022 dev_notice(hnat_priv->dev, "%s:No neigh (daddr=%pI4)\n", __func__,
1023 &ip_hdr(skb)->daddr);
1024 rcu_read_unlock_bh();
1025 return -1;
1026 }
1027
1028 /* why do we get all zero ethernet address ? */
1029 if (!is_valid_ether_addr(neigh->ha)) {
1030 rcu_read_unlock_bh();
1031 return -1;
1032 }
1033
1034 memcpy(eth_hdr(skb)->h_dest, neigh->ha, ETH_ALEN);
1035 memcpy(eth_hdr(skb)->h_source, out->dev_addr, ETH_ALEN);
1036
1037 rcu_read_unlock_bh();
1038
1039 return 0;
1040}
1041
1042static u16 ppe_get_chkbase(struct iphdr *iph)
1043{
1044 u16 org_chksum = ntohs(iph->check);
1045 u16 org_tot_len = ntohs(iph->tot_len);
1046 u16 org_id = ntohs(iph->id);
1047 u16 chksum_tmp, tot_len_tmp, id_tmp;
1048 u32 tmp = 0;
1049 u16 chksum_base = 0;
1050
1051 chksum_tmp = ~(org_chksum);
1052 tot_len_tmp = ~(org_tot_len);
1053 id_tmp = ~(org_id);
1054 tmp = chksum_tmp + tot_len_tmp + id_tmp;
1055 tmp = ((tmp >> 16) & 0x7) + (tmp & 0xFFFF);
1056 tmp = ((tmp >> 16) & 0x7) + (tmp & 0xFFFF);
1057 chksum_base = tmp & 0xFFFF;
1058
1059 return chksum_base;
1060}
1061
1062struct foe_entry ppe_fill_L2_info(struct ethhdr *eth, struct foe_entry entry,
1063 struct flow_offload_hw_path *hw_path)
1064{
developer5ffc5f12022-10-25 18:51:46 +08001065 switch ((int)entry.bfib1.pkt_type) {
developerfd40db22021-04-29 10:08:25 +08001066 case IPV4_HNAPT:
1067 case IPV4_HNAT:
1068 entry.ipv4_hnapt.dmac_hi = swab32(*((u32 *)eth->h_dest));
1069 entry.ipv4_hnapt.dmac_lo = swab16(*((u16 *)&eth->h_dest[4]));
1070 entry.ipv4_hnapt.smac_hi = swab32(*((u32 *)eth->h_source));
1071 entry.ipv4_hnapt.smac_lo = swab16(*((u16 *)&eth->h_source[4]));
1072 entry.ipv4_hnapt.pppoe_id = hw_path->pppoe_sid;
1073 break;
1074 case IPV4_DSLITE:
1075 case IPV4_MAP_E:
1076 case IPV6_6RD:
1077 case IPV6_5T_ROUTE:
1078 case IPV6_3T_ROUTE:
developer5ffc5f12022-10-25 18:51:46 +08001079 case IPV6_HNAPT:
1080 case IPV6_HNAT:
developerfd40db22021-04-29 10:08:25 +08001081 entry.ipv6_5t_route.dmac_hi = swab32(*((u32 *)eth->h_dest));
1082 entry.ipv6_5t_route.dmac_lo = swab16(*((u16 *)&eth->h_dest[4]));
1083 entry.ipv6_5t_route.smac_hi = swab32(*((u32 *)eth->h_source));
1084 entry.ipv6_5t_route.smac_lo =
1085 swab16(*((u16 *)&eth->h_source[4]));
1086 entry.ipv6_5t_route.pppoe_id = hw_path->pppoe_sid;
1087 break;
1088 }
1089 return entry;
1090}
1091
1092struct foe_entry ppe_fill_info_blk(struct ethhdr *eth, struct foe_entry entry,
1093 struct flow_offload_hw_path *hw_path)
1094{
1095 entry.bfib1.psn = (hw_path->flags & FLOW_OFFLOAD_PATH_PPPOE) ? 1 : 0;
1096 entry.bfib1.vlan_layer += (hw_path->flags & FLOW_OFFLOAD_PATH_VLAN) ? 1 : 0;
1097 entry.bfib1.vpm = (entry.bfib1.vlan_layer) ? 1 : 0;
developerfd40db22021-04-29 10:08:25 +08001098 entry.bfib1.cah = 1;
developer4164cfe2022-12-01 11:27:41 +08001099 entry.bfib1.time_stamp = (hnat_priv->data->version == MTK_HNAT_V2 ||
1100 hnat_priv->data->version == MTK_HNAT_V3) ?
developerfd40db22021-04-29 10:08:25 +08001101 readl(hnat_priv->fe_base + 0x0010) & (0xFF) :
1102 readl(hnat_priv->fe_base + 0x0010) & (0x7FFF);
1103
developer5ffc5f12022-10-25 18:51:46 +08001104 switch ((int)entry.bfib1.pkt_type) {
developerfd40db22021-04-29 10:08:25 +08001105 case IPV4_HNAPT:
1106 case IPV4_HNAT:
developer8116b0a2021-08-23 18:07:20 +08001107 if (hnat_priv->data->mcast &&
1108 is_multicast_ether_addr(&eth->h_dest[0])) {
developerfd40db22021-04-29 10:08:25 +08001109 entry.ipv4_hnapt.iblk2.mcast = 1;
developer4164cfe2022-12-01 11:27:41 +08001110 if (hnat_priv->data->version == MTK_HNAT_V1_3) {
developerfd40db22021-04-29 10:08:25 +08001111 entry.bfib1.sta = 1;
1112 entry.ipv4_hnapt.m_timestamp = foe_timestamp(hnat_priv);
1113 }
1114 } else {
1115 entry.ipv4_hnapt.iblk2.mcast = 0;
1116 }
1117
1118 entry.ipv4_hnapt.iblk2.port_ag =
developer4164cfe2022-12-01 11:27:41 +08001119 (hnat_priv->data->version == MTK_HNAT_V2 ||
1120 hnat_priv->data->version == MTK_HNAT_V3) ? 0xf : 0x3f;
developerfd40db22021-04-29 10:08:25 +08001121 break;
1122 case IPV4_DSLITE:
1123 case IPV4_MAP_E:
1124 case IPV6_6RD:
1125 case IPV6_5T_ROUTE:
1126 case IPV6_3T_ROUTE:
developer5ffc5f12022-10-25 18:51:46 +08001127 case IPV6_HNAPT:
1128 case IPV6_HNAT:
developer8116b0a2021-08-23 18:07:20 +08001129 if (hnat_priv->data->mcast &&
1130 is_multicast_ether_addr(&eth->h_dest[0])) {
developerfd40db22021-04-29 10:08:25 +08001131 entry.ipv6_5t_route.iblk2.mcast = 1;
developer4164cfe2022-12-01 11:27:41 +08001132 if (hnat_priv->data->version == MTK_HNAT_V1_3) {
developerfd40db22021-04-29 10:08:25 +08001133 entry.bfib1.sta = 1;
1134 entry.ipv4_hnapt.m_timestamp = foe_timestamp(hnat_priv);
1135 }
1136 } else {
1137 entry.ipv6_5t_route.iblk2.mcast = 0;
1138 }
1139
1140 entry.ipv6_5t_route.iblk2.port_ag =
developer4164cfe2022-12-01 11:27:41 +08001141 (hnat_priv->data->version == MTK_HNAT_V2 ||
1142 hnat_priv->data->version == MTK_HNAT_V3) ? 0xf : 0x3f;
developerfd40db22021-04-29 10:08:25 +08001143 break;
1144 }
1145 return entry;
1146}
1147
developerfd40db22021-04-29 10:08:25 +08001148static unsigned int skb_to_hnat_info(struct sk_buff *skb,
1149 const struct net_device *dev,
1150 struct foe_entry *foe,
1151 struct flow_offload_hw_path *hw_path)
1152{
1153 struct foe_entry entry = { 0 };
1154 int whnat = IS_WHNAT(dev);
1155 struct ethhdr *eth;
1156 struct iphdr *iph;
1157 struct ipv6hdr *ip6h;
1158 struct tcpudphdr _ports;
1159 const struct tcpudphdr *pptr;
developer5ffc5f12022-10-25 18:51:46 +08001160 struct nf_conn *ct;
1161 enum ip_conntrack_info ctinfo;
developerfd40db22021-04-29 10:08:25 +08001162 u32 gmac = NR_DISCARD;
1163 int udp = 0;
1164 u32 qid = 0;
developeraf07fad2021-11-19 17:53:42 +08001165 u32 port_id = 0;
developerfd40db22021-04-29 10:08:25 +08001166 int mape = 0;
1167
developer5ffc5f12022-10-25 18:51:46 +08001168 ct = nf_ct_get(skb, &ctinfo);
1169
developerfd40db22021-04-29 10:08:25 +08001170 if (ipv6_hdr(skb)->nexthdr == NEXTHDR_IPIP)
1171 /* point to ethernet header for DS-Lite and MapE */
1172 eth = (struct ethhdr *)(skb->data - ETH_HLEN);
1173 else
1174 eth = eth_hdr(skb);
developer8116b0a2021-08-23 18:07:20 +08001175
1176 /*do not bind multicast if PPE mcast not enable*/
1177 if (!hnat_priv->data->mcast && is_multicast_ether_addr(eth->h_dest))
1178 return 0;
developerfd40db22021-04-29 10:08:25 +08001179
1180 entry.bfib1.pkt_type = foe->udib1.pkt_type; /* Get packte type state*/
developerf94d8862022-03-29 10:11:17 +08001181 entry.bfib1.state = foe->udib1.state;
1182
developerd35bbcc2022-09-28 22:46:01 +08001183#if defined(CONFIG_MEDIATEK_NETSYS_V2) || defined(CONFIG_MEDIATEK_NETSYS_V3)
developerfd40db22021-04-29 10:08:25 +08001184 entry.bfib1.sp = foe->udib1.sp;
1185#endif
1186
1187 switch (ntohs(eth->h_proto)) {
1188 case ETH_P_IP:
1189 iph = ip_hdr(skb);
1190 switch (iph->protocol) {
1191 case IPPROTO_UDP:
1192 udp = 1;
1193 /* fallthrough */
1194 case IPPROTO_TCP:
1195 entry.ipv4_hnapt.etype = htons(ETH_P_IP);
1196
1197 /* DS-Lite WAN->LAN */
1198 if (entry.ipv4_hnapt.bfib1.pkt_type == IPV4_DSLITE ||
1199 entry.ipv4_hnapt.bfib1.pkt_type == IPV4_MAP_E) {
1200 entry.ipv4_dslite.sip = foe->ipv4_dslite.sip;
1201 entry.ipv4_dslite.dip = foe->ipv4_dslite.dip;
1202 entry.ipv4_dslite.sport =
1203 foe->ipv4_dslite.sport;
1204 entry.ipv4_dslite.dport =
1205 foe->ipv4_dslite.dport;
1206
developerd35bbcc2022-09-28 22:46:01 +08001207#if defined(CONFIG_MEDIATEK_NETSYS_V2) || defined(CONFIG_MEDIATEK_NETSYS_V3)
developerfd40db22021-04-29 10:08:25 +08001208 if (entry.bfib1.pkt_type == IPV4_MAP_E) {
1209 pptr = skb_header_pointer(skb,
1210 iph->ihl * 4,
1211 sizeof(_ports),
1212 &_ports);
developer4c32b7a2021-11-13 16:46:43 +08001213 if (unlikely(!pptr))
1214 return -1;
developerfd40db22021-04-29 10:08:25 +08001215
developerd35bbcc2022-09-28 22:46:01 +08001216 entry.ipv4_mape.new_sip =
developerfd40db22021-04-29 10:08:25 +08001217 ntohl(iph->saddr);
developerd35bbcc2022-09-28 22:46:01 +08001218 entry.ipv4_mape.new_dip =
developerfd40db22021-04-29 10:08:25 +08001219 ntohl(iph->daddr);
developerd35bbcc2022-09-28 22:46:01 +08001220 entry.ipv4_mape.new_sport =
developerfd40db22021-04-29 10:08:25 +08001221 ntohs(pptr->src);
developerd35bbcc2022-09-28 22:46:01 +08001222 entry.ipv4_mape.new_dport =
developerfd40db22021-04-29 10:08:25 +08001223 ntohs(pptr->dst);
1224 }
1225#endif
1226
1227 entry.ipv4_dslite.tunnel_sipv6_0 =
1228 foe->ipv4_dslite.tunnel_sipv6_0;
1229 entry.ipv4_dslite.tunnel_sipv6_1 =
1230 foe->ipv4_dslite.tunnel_sipv6_1;
1231 entry.ipv4_dslite.tunnel_sipv6_2 =
1232 foe->ipv4_dslite.tunnel_sipv6_2;
1233 entry.ipv4_dslite.tunnel_sipv6_3 =
1234 foe->ipv4_dslite.tunnel_sipv6_3;
1235
1236 entry.ipv4_dslite.tunnel_dipv6_0 =
1237 foe->ipv4_dslite.tunnel_dipv6_0;
1238 entry.ipv4_dslite.tunnel_dipv6_1 =
1239 foe->ipv4_dslite.tunnel_dipv6_1;
1240 entry.ipv4_dslite.tunnel_dipv6_2 =
1241 foe->ipv4_dslite.tunnel_dipv6_2;
1242 entry.ipv4_dslite.tunnel_dipv6_3 =
1243 foe->ipv4_dslite.tunnel_dipv6_3;
1244
1245 entry.ipv4_dslite.bfib1.rmt = 1;
1246 entry.ipv4_dslite.iblk2.dscp = iph->tos;
1247 entry.ipv4_dslite.vlan1 = hw_path->vlan_id;
1248 if (hnat_priv->data->per_flow_accounting)
1249 entry.ipv4_dslite.iblk2.mibf = 1;
1250
1251 } else {
1252 entry.ipv4_hnapt.iblk2.dscp = iph->tos;
1253 if (hnat_priv->data->per_flow_accounting)
1254 entry.ipv4_hnapt.iblk2.mibf = 1;
1255
1256 entry.ipv4_hnapt.vlan1 = hw_path->vlan_id;
1257
developerdfc8ef52022-12-06 14:00:09 +08001258 if (skb_vlan_tag_present(skb)) {
developerfd40db22021-04-29 10:08:25 +08001259 entry.bfib1.vlan_layer += 1;
1260
1261 if (entry.ipv4_hnapt.vlan1)
developerdfc8ef52022-12-06 14:00:09 +08001262 entry.ipv4_hnapt.vlan2 =
1263 skb->vlan_tci;
developerfd40db22021-04-29 10:08:25 +08001264 else
developerdfc8ef52022-12-06 14:00:09 +08001265 entry.ipv4_hnapt.vlan1 =
1266 skb->vlan_tci;
1267 }
developerfd40db22021-04-29 10:08:25 +08001268
1269 entry.ipv4_hnapt.sip = foe->ipv4_hnapt.sip;
1270 entry.ipv4_hnapt.dip = foe->ipv4_hnapt.dip;
1271 entry.ipv4_hnapt.sport = foe->ipv4_hnapt.sport;
1272 entry.ipv4_hnapt.dport = foe->ipv4_hnapt.dport;
1273
1274 entry.ipv4_hnapt.new_sip = ntohl(iph->saddr);
1275 entry.ipv4_hnapt.new_dip = ntohl(iph->daddr);
1276 }
1277
1278 entry.ipv4_hnapt.bfib1.udp = udp;
1279 if (IS_IPV4_HNAPT(foe)) {
1280 pptr = skb_header_pointer(skb, iph->ihl * 4,
1281 sizeof(_ports),
1282 &_ports);
developer4c32b7a2021-11-13 16:46:43 +08001283 if (unlikely(!pptr))
1284 return -1;
1285
developerfd40db22021-04-29 10:08:25 +08001286 entry.ipv4_hnapt.new_sport = ntohs(pptr->src);
1287 entry.ipv4_hnapt.new_dport = ntohs(pptr->dst);
1288 }
1289
1290 break;
1291
1292 default:
1293 return -1;
1294 }
1295 trace_printk(
1296 "[%s]skb->head=%p, skb->data=%p,ip_hdr=%p, skb->len=%d, skb->data_len=%d\n",
1297 __func__, skb->head, skb->data, iph, skb->len,
1298 skb->data_len);
1299 break;
1300
1301 case ETH_P_IPV6:
1302 ip6h = ipv6_hdr(skb);
1303 switch (ip6h->nexthdr) {
1304 case NEXTHDR_UDP:
1305 udp = 1;
1306 /* fallthrough */
1307 case NEXTHDR_TCP: /* IPv6-5T or IPv6-3T */
1308 entry.ipv6_5t_route.etype = htons(ETH_P_IPV6);
1309
1310 entry.ipv6_5t_route.vlan1 = hw_path->vlan_id;
1311
developerdfc8ef52022-12-06 14:00:09 +08001312 if (skb_vlan_tag_present(skb)) {
developerfd40db22021-04-29 10:08:25 +08001313 entry.bfib1.vlan_layer += 1;
1314
1315 if (entry.ipv6_5t_route.vlan1)
developerdfc8ef52022-12-06 14:00:09 +08001316 entry.ipv6_5t_route.vlan2 =
1317 skb->vlan_tci;
developerfd40db22021-04-29 10:08:25 +08001318 else
developerdfc8ef52022-12-06 14:00:09 +08001319 entry.ipv6_5t_route.vlan1 =
1320 skb->vlan_tci;
developerfd40db22021-04-29 10:08:25 +08001321 }
1322
1323 if (hnat_priv->data->per_flow_accounting)
1324 entry.ipv6_5t_route.iblk2.mibf = 1;
1325 entry.ipv6_5t_route.bfib1.udp = udp;
1326
1327 if (IS_IPV6_6RD(foe)) {
1328 entry.ipv6_5t_route.bfib1.rmt = 1;
1329 entry.ipv6_6rd.tunnel_sipv4 =
1330 foe->ipv6_6rd.tunnel_sipv4;
1331 entry.ipv6_6rd.tunnel_dipv4 =
1332 foe->ipv6_6rd.tunnel_dipv4;
1333 }
1334
1335 entry.ipv6_3t_route.ipv6_sip0 =
1336 foe->ipv6_3t_route.ipv6_sip0;
1337 entry.ipv6_3t_route.ipv6_sip1 =
1338 foe->ipv6_3t_route.ipv6_sip1;
1339 entry.ipv6_3t_route.ipv6_sip2 =
1340 foe->ipv6_3t_route.ipv6_sip2;
1341 entry.ipv6_3t_route.ipv6_sip3 =
1342 foe->ipv6_3t_route.ipv6_sip3;
1343
1344 entry.ipv6_3t_route.ipv6_dip0 =
1345 foe->ipv6_3t_route.ipv6_dip0;
1346 entry.ipv6_3t_route.ipv6_dip1 =
1347 foe->ipv6_3t_route.ipv6_dip1;
1348 entry.ipv6_3t_route.ipv6_dip2 =
1349 foe->ipv6_3t_route.ipv6_dip2;
1350 entry.ipv6_3t_route.ipv6_dip3 =
1351 foe->ipv6_3t_route.ipv6_dip3;
1352
developer729f0272021-06-09 17:28:38 +08001353 if (IS_IPV6_3T_ROUTE(foe)) {
1354 entry.ipv6_3t_route.prot =
1355 foe->ipv6_3t_route.prot;
1356 entry.ipv6_3t_route.hph =
1357 foe->ipv6_3t_route.hph;
1358 }
1359
developerfd40db22021-04-29 10:08:25 +08001360 if (IS_IPV6_5T_ROUTE(foe) || IS_IPV6_6RD(foe)) {
1361 entry.ipv6_5t_route.sport =
1362 foe->ipv6_5t_route.sport;
1363 entry.ipv6_5t_route.dport =
1364 foe->ipv6_5t_route.dport;
1365 }
developer5ffc5f12022-10-25 18:51:46 +08001366
1367#if defined(CONFIG_MEDIATEK_NETSYS_V3)
1368 if (ct && (ct->status & IPS_SRC_NAT)) {
1369 entry.bfib1.pkt_type = IPV6_HNAPT;
1370
1371 if (IS_WAN(dev) || IS_DSA_WAN(dev)) {
1372 entry.ipv6_hnapt.eg_ipv6_dir =
1373 IPV6_SNAT;
1374 entry.ipv6_hnapt.new_ipv6_ip0 =
1375 ntohl(ip6h->saddr.s6_addr32[0]);
1376 entry.ipv6_hnapt.new_ipv6_ip1 =
1377 ntohl(ip6h->saddr.s6_addr32[1]);
1378 entry.ipv6_hnapt.new_ipv6_ip2 =
1379 ntohl(ip6h->saddr.s6_addr32[2]);
1380 entry.ipv6_hnapt.new_ipv6_ip3 =
1381 ntohl(ip6h->saddr.s6_addr32[3]);
1382 } else {
1383 entry.ipv6_hnapt.eg_ipv6_dir =
1384 IPV6_DNAT;
1385 entry.ipv6_hnapt.new_ipv6_ip0 =
1386 ntohl(ip6h->daddr.s6_addr32[0]);
1387 entry.ipv6_hnapt.new_ipv6_ip1 =
1388 ntohl(ip6h->daddr.s6_addr32[1]);
1389 entry.ipv6_hnapt.new_ipv6_ip2 =
1390 ntohl(ip6h->daddr.s6_addr32[2]);
1391 entry.ipv6_hnapt.new_ipv6_ip3 =
1392 ntohl(ip6h->daddr.s6_addr32[3]);
1393 }
1394
1395 pptr = skb_header_pointer(skb, IPV6_HDR_LEN,
1396 sizeof(_ports),
1397 &_ports);
1398 if (unlikely(!pptr))
1399 return -1;
1400
1401 entry.ipv6_hnapt.new_sport = ntohs(pptr->src);
1402 entry.ipv6_hnapt.new_dport = ntohs(pptr->dst);
1403 }
1404#endif
1405
developerfd40db22021-04-29 10:08:25 +08001406 entry.ipv6_5t_route.iblk2.dscp =
1407 (ip6h->priority << 4 |
1408 (ip6h->flow_lbl[0] >> 4));
1409 break;
1410
1411 case NEXTHDR_IPIP:
1412 if ((!mape_toggle &&
1413 entry.bfib1.pkt_type == IPV4_DSLITE) ||
1414 (mape_toggle &&
1415 entry.bfib1.pkt_type == IPV4_MAP_E)) {
1416 /* DS-Lite LAN->WAN */
1417 entry.ipv4_dslite.bfib1.udp =
1418 foe->ipv4_dslite.bfib1.udp;
1419 entry.ipv4_dslite.sip = foe->ipv4_dslite.sip;
1420 entry.ipv4_dslite.dip = foe->ipv4_dslite.dip;
1421 entry.ipv4_dslite.sport =
1422 foe->ipv4_dslite.sport;
1423 entry.ipv4_dslite.dport =
1424 foe->ipv4_dslite.dport;
1425
1426 entry.ipv4_dslite.tunnel_sipv6_0 =
1427 ntohl(ip6h->saddr.s6_addr32[0]);
1428 entry.ipv4_dslite.tunnel_sipv6_1 =
1429 ntohl(ip6h->saddr.s6_addr32[1]);
1430 entry.ipv4_dslite.tunnel_sipv6_2 =
1431 ntohl(ip6h->saddr.s6_addr32[2]);
1432 entry.ipv4_dslite.tunnel_sipv6_3 =
1433 ntohl(ip6h->saddr.s6_addr32[3]);
1434
1435 entry.ipv4_dslite.tunnel_dipv6_0 =
1436 ntohl(ip6h->daddr.s6_addr32[0]);
1437 entry.ipv4_dslite.tunnel_dipv6_1 =
1438 ntohl(ip6h->daddr.s6_addr32[1]);
1439 entry.ipv4_dslite.tunnel_dipv6_2 =
1440 ntohl(ip6h->daddr.s6_addr32[2]);
1441 entry.ipv4_dslite.tunnel_dipv6_3 =
1442 ntohl(ip6h->daddr.s6_addr32[3]);
1443
1444 ppe_fill_flow_lbl(&entry, ip6h);
1445
1446 entry.ipv4_dslite.priority = ip6h->priority;
1447 entry.ipv4_dslite.hop_limit = ip6h->hop_limit;
1448 entry.ipv4_dslite.vlan1 = hw_path->vlan_id;
1449 if (hnat_priv->data->per_flow_accounting)
1450 entry.ipv4_dslite.iblk2.mibf = 1;
developer25fc8c02022-05-06 16:24:02 +08001451 /* Map-E LAN->WAN record inner IPv4 header info. */
developer8c707df2022-10-24 14:09:00 +08001452#if defined(CONFIG_MEDIATEK_NETSYS_V2) || defined(CONFIG_MEDIATEK_NETSYS_V3)
developer25fc8c02022-05-06 16:24:02 +08001453 if (mape_toggle) {
1454 entry.ipv4_dslite.iblk2.dscp = foe->ipv4_dslite.iblk2.dscp;
developerd35bbcc2022-09-28 22:46:01 +08001455 entry.ipv4_mape.new_sip = foe->ipv4_mape.new_sip;
1456 entry.ipv4_mape.new_dip = foe->ipv4_mape.new_dip;
1457 entry.ipv4_mape.new_sport = foe->ipv4_mape.new_sport;
1458 entry.ipv4_mape.new_dport = foe->ipv4_mape.new_dport;
developer25fc8c02022-05-06 16:24:02 +08001459 }
1460#endif
developerfd40db22021-04-29 10:08:25 +08001461 } else if (mape_toggle &&
1462 entry.bfib1.pkt_type == IPV4_HNAPT) {
1463 /* MapE LAN -> WAN */
1464 mape = 1;
1465 entry.ipv4_hnapt.iblk2.dscp =
1466 foe->ipv4_hnapt.iblk2.dscp;
1467 if (hnat_priv->data->per_flow_accounting)
1468 entry.ipv4_hnapt.iblk2.mibf = 1;
1469
developerbb816412021-06-11 15:43:44 +08001470 if (IS_GMAC1_MODE)
1471 entry.ipv4_hnapt.vlan1 = 1;
1472 else
1473 entry.ipv4_hnapt.vlan1 = hw_path->vlan_id;
developerfd40db22021-04-29 10:08:25 +08001474
1475 entry.ipv4_hnapt.sip = foe->ipv4_hnapt.sip;
1476 entry.ipv4_hnapt.dip = foe->ipv4_hnapt.dip;
1477 entry.ipv4_hnapt.sport = foe->ipv4_hnapt.sport;
1478 entry.ipv4_hnapt.dport = foe->ipv4_hnapt.dport;
1479
1480 entry.ipv4_hnapt.new_sip =
1481 foe->ipv4_hnapt.new_sip;
1482 entry.ipv4_hnapt.new_dip =
1483 foe->ipv4_hnapt.new_dip;
1484 entry.ipv4_hnapt.etype = htons(ETH_P_IP);
1485
developer34028fb2022-01-11 13:51:29 +08001486 if (IS_HQOS_MODE) {
developeraf07fad2021-11-19 17:53:42 +08001487 entry.ipv4_hnapt.iblk2.qid =
developer4164cfe2022-12-01 11:27:41 +08001488 (hnat_priv->data->version ==
1489 MTK_HNAT_V2 ||
1490 hnat_priv->data->version ==
1491 MTK_HNAT_V3) ?
developeraf07fad2021-11-19 17:53:42 +08001492 skb->mark & 0x7f : skb->mark & 0xf;
developerd35bbcc2022-09-28 22:46:01 +08001493#if defined(CONFIG_MEDIATEK_NETSYS_V3)
developer934756a2022-11-18 14:51:34 +08001494 if ((IS_HQOS_UL_MODE && IS_WAN(dev)) ||
developer493adc32022-11-29 22:34:18 +08001495 (IS_HQOS_DL_MODE &&
1496 IS_LAN_GRP(dev)) ||
developer934756a2022-11-18 14:51:34 +08001497 (IS_PPPQ_MODE &&
1498 IS_PPPQ_PATH(dev, skb)))
developer47545a32022-11-15 16:06:58 +08001499 entry.ipv4_hnapt.tport_id = 1;
1500 else
1501 entry.ipv4_hnapt.tport_id = 0;
developerd35bbcc2022-09-28 22:46:01 +08001502#else
developeraf07fad2021-11-19 17:53:42 +08001503 entry.ipv4_hnapt.iblk2.fqos = 1;
developerd35bbcc2022-09-28 22:46:01 +08001504#endif
developeraf07fad2021-11-19 17:53:42 +08001505 }
developerfd40db22021-04-29 10:08:25 +08001506
1507 entry.ipv4_hnapt.bfib1.udp =
1508 foe->ipv4_hnapt.bfib1.udp;
1509
1510 entry.ipv4_hnapt.new_sport =
1511 foe->ipv4_hnapt.new_sport;
1512 entry.ipv4_hnapt.new_dport =
1513 foe->ipv4_hnapt.new_dport;
1514 mape_l2w_v6h = *ip6h;
1515 }
1516 break;
1517
1518 default:
1519 return -1;
1520 }
1521
1522 trace_printk(
1523 "[%s]skb->head=%p, skb->data=%p,ipv6_hdr=%p, skb->len=%d, skb->data_len=%d\n",
1524 __func__, skb->head, skb->data, ip6h, skb->len,
1525 skb->data_len);
1526 break;
1527
1528 default:
developerfd40db22021-04-29 10:08:25 +08001529 iph = ip_hdr(skb);
1530 switch (entry.bfib1.pkt_type) {
1531 case IPV6_6RD: /* 6RD LAN->WAN */
1532 entry.ipv6_6rd.ipv6_sip0 = foe->ipv6_6rd.ipv6_sip0;
1533 entry.ipv6_6rd.ipv6_sip1 = foe->ipv6_6rd.ipv6_sip1;
1534 entry.ipv6_6rd.ipv6_sip2 = foe->ipv6_6rd.ipv6_sip2;
1535 entry.ipv6_6rd.ipv6_sip3 = foe->ipv6_6rd.ipv6_sip3;
1536
1537 entry.ipv6_6rd.ipv6_dip0 = foe->ipv6_6rd.ipv6_dip0;
1538 entry.ipv6_6rd.ipv6_dip1 = foe->ipv6_6rd.ipv6_dip1;
1539 entry.ipv6_6rd.ipv6_dip2 = foe->ipv6_6rd.ipv6_dip2;
1540 entry.ipv6_6rd.ipv6_dip3 = foe->ipv6_6rd.ipv6_dip3;
1541
1542 entry.ipv6_6rd.sport = foe->ipv6_6rd.sport;
1543 entry.ipv6_6rd.dport = foe->ipv6_6rd.dport;
1544 entry.ipv6_6rd.tunnel_sipv4 = ntohl(iph->saddr);
1545 entry.ipv6_6rd.tunnel_dipv4 = ntohl(iph->daddr);
1546 entry.ipv6_6rd.hdr_chksum = ppe_get_chkbase(iph);
1547 entry.ipv6_6rd.flag = (ntohs(iph->frag_off) >> 13);
1548 entry.ipv6_6rd.ttl = iph->ttl;
1549 entry.ipv6_6rd.dscp = iph->tos;
1550 entry.ipv6_6rd.per_flow_6rd_id = 1;
1551 entry.ipv6_6rd.vlan1 = hw_path->vlan_id;
1552 if (hnat_priv->data->per_flow_accounting)
1553 entry.ipv6_6rd.iblk2.mibf = 1;
1554 break;
1555
1556 default:
1557 return -1;
1558 }
1559 }
1560
1561 /* Fill Layer2 Info.*/
1562 entry = ppe_fill_L2_info(eth, entry, hw_path);
1563
1564 /* Fill Info Blk*/
1565 entry = ppe_fill_info_blk(eth, entry, hw_path);
1566
1567 if (IS_LAN(dev)) {
1568 if (IS_DSA_LAN(dev))
developeraf07fad2021-11-19 17:53:42 +08001569 port_id = hnat_dsa_fill_stag(dev, &entry, hw_path,
1570 ntohs(eth->h_proto),
1571 mape);
developerfd40db22021-04-29 10:08:25 +08001572
1573 if (IS_BOND_MODE)
1574 gmac = ((skb_hnat_entry(skb) >> 1) % hnat_priv->gmac_num) ?
1575 NR_GMAC2_PORT : NR_GMAC1_PORT;
1576 else
1577 gmac = NR_GMAC1_PORT;
developerd35bbcc2022-09-28 22:46:01 +08001578 } else if (IS_LAN2(dev)) {
1579 gmac = NR_GMAC3_PORT;
developerfd40db22021-04-29 10:08:25 +08001580 } else if (IS_WAN(dev)) {
1581 if (IS_DSA_WAN(dev))
developeraf07fad2021-11-19 17:53:42 +08001582 port_id = hnat_dsa_fill_stag(dev,&entry, hw_path,
1583 ntohs(eth->h_proto),
1584 mape);
developerfd40db22021-04-29 10:08:25 +08001585 if (mape_toggle && mape == 1) {
1586 gmac = NR_PDMA_PORT;
1587 /* Set act_dp = wan_dev */
1588 entry.ipv4_hnapt.act_dp = dev->ifindex;
1589 } else {
1590 gmac = (IS_GMAC1_MODE) ? NR_GMAC1_PORT : NR_GMAC2_PORT;
1591 }
developerd35bbcc2022-09-28 22:46:01 +08001592 } else if (IS_EXT(dev) && (FROM_GE_PPD(skb) || FROM_GE_LAN_GRP(skb) ||
developer99506e52021-06-30 22:03:02 +08001593 FROM_GE_WAN(skb) || FROM_GE_VIRTUAL(skb) || FROM_WED(skb))) {
developerfd40db22021-04-29 10:08:25 +08001594 if (!hnat_priv->data->whnat && IS_GMAC1_MODE) {
1595 entry.bfib1.vpm = 1;
1596 entry.bfib1.vlan_layer = 1;
1597
1598 if (FROM_GE_LAN(skb))
1599 entry.ipv4_hnapt.vlan1 = 1;
1600 else if (FROM_GE_WAN(skb) || FROM_GE_VIRTUAL(skb))
1601 entry.ipv4_hnapt.vlan1 = 2;
1602 }
1603
1604 trace_printk("learn of lan or wan(iif=%x) --> %s(ext)\n",
1605 skb_hnat_iface(skb), dev->name);
1606 /* To CPU then stolen by pre-routing hant hook of LAN/WAN
1607 * Current setting is PDMA RX.
1608 */
1609 gmac = NR_PDMA_PORT;
1610 if (IS_IPV4_GRP(foe))
1611 entry.ipv4_hnapt.act_dp = dev->ifindex;
1612 else
1613 entry.ipv6_5t_route.act_dp = dev->ifindex;
1614 } else {
1615 printk_ratelimited(KERN_WARNING
1616 "Unknown case of dp, iif=%x --> %s\n",
1617 skb_hnat_iface(skb), dev->name);
1618
1619 return 0;
1620 }
1621
developerafff5662022-06-29 10:09:56 +08001622 if (IS_HQOS_MODE || skb->mark >= MAX_PPPQ_PORT_NUM)
developeraf07fad2021-11-19 17:53:42 +08001623 qid = skb->mark & (MTK_QDMA_TX_MASK);
developer934756a2022-11-18 14:51:34 +08001624 else if (IS_PPPQ_MODE && IS_PPPQ_PATH(dev, skb))
developeraf07fad2021-11-19 17:53:42 +08001625 qid = port_id & MTK_QDMA_TX_MASK;
1626 else
1627 qid = 0;
developerfd40db22021-04-29 10:08:25 +08001628
1629 if (IS_IPV4_GRP(foe)) {
1630 entry.ipv4_hnapt.iblk2.dp = gmac;
1631 entry.ipv4_hnapt.iblk2.port_mg =
developer4164cfe2022-12-01 11:27:41 +08001632 (hnat_priv->data->version == MTK_HNAT_V1_1) ? 0x3f : 0;
developer24948202021-11-24 17:38:27 +08001633
developeraf07fad2021-11-19 17:53:42 +08001634 if (qos_toggle) {
developer4164cfe2022-12-01 11:27:41 +08001635 if (hnat_priv->data->version == MTK_HNAT_V2 ||
1636 hnat_priv->data->version == MTK_HNAT_V3) {
developeraf07fad2021-11-19 17:53:42 +08001637 entry.ipv4_hnapt.iblk2.qid = qid & 0x7f;
1638 } else {
1639 /* qid[5:0]= port_mg[1:0]+ qid[3:0] */
1640 entry.ipv4_hnapt.iblk2.qid = qid & 0xf;
developer4164cfe2022-12-01 11:27:41 +08001641 if (hnat_priv->data->version != MTK_HNAT_V1_1)
developeraf07fad2021-11-19 17:53:42 +08001642 entry.ipv4_hnapt.iblk2.port_mg |=
1643 ((qid >> 4) & 0x3);
developerfd40db22021-04-29 10:08:25 +08001644
developerd35bbcc2022-09-28 22:46:01 +08001645 if (((IS_EXT(dev) && (FROM_GE_LAN_GRP(skb) ||
developeraf07fad2021-11-19 17:53:42 +08001646 FROM_GE_WAN(skb) || FROM_GE_VIRTUAL(skb))) ||
1647 ((mape_toggle && mape == 1) && !FROM_EXT(skb))) &&
1648 (!whnat)) {
1649 entry.ipv4_hnapt.etype = htons(HQOS_MAGIC_TAG);
1650 entry.ipv4_hnapt.vlan1 = skb_hnat_entry(skb);
1651 entry.bfib1.vlan_layer = 1;
1652 }
developerfd40db22021-04-29 10:08:25 +08001653 }
developerfd40db22021-04-29 10:08:25 +08001654
developer34028fb2022-01-11 13:51:29 +08001655 if (FROM_EXT(skb) || skb_hnat_sport(skb) == NR_QDMA_PORT ||
1656 (IS_PPPQ_MODE && !IS_DSA_LAN(dev) && !IS_DSA_WAN(dev)))
developeraf07fad2021-11-19 17:53:42 +08001657 entry.ipv4_hnapt.iblk2.fqos = 0;
1658 else
developerd35bbcc2022-09-28 22:46:01 +08001659#if defined(CONFIG_MEDIATEK_NETSYS_V3)
developer934756a2022-11-18 14:51:34 +08001660 if ((IS_HQOS_UL_MODE && IS_WAN(dev)) ||
developer493adc32022-11-29 22:34:18 +08001661 (IS_HQOS_DL_MODE && IS_LAN_GRP(dev)) ||
developer934756a2022-11-18 14:51:34 +08001662 (IS_PPPQ_MODE &&
1663 IS_PPPQ_PATH(dev, skb)))
developer47545a32022-11-15 16:06:58 +08001664 entry.ipv4_hnapt.tport_id = 1;
1665 else
1666 entry.ipv4_hnapt.tport_id = 0;
developerd35bbcc2022-09-28 22:46:01 +08001667#else
developer399ec072022-06-24 16:07:41 +08001668 entry.ipv4_hnapt.iblk2.fqos =
developer934756a2022-11-18 14:51:34 +08001669 (!IS_PPPQ_MODE ||
1670 (IS_PPPQ_MODE &&
1671 IS_PPPQ_PATH(dev, skb)));
developerd35bbcc2022-09-28 22:46:01 +08001672#endif
developeraf07fad2021-11-19 17:53:42 +08001673 } else {
developerfd40db22021-04-29 10:08:25 +08001674 entry.ipv4_hnapt.iblk2.fqos = 0;
developeraf07fad2021-11-19 17:53:42 +08001675 }
developerfd40db22021-04-29 10:08:25 +08001676 } else {
1677 entry.ipv6_5t_route.iblk2.dp = gmac;
1678 entry.ipv6_5t_route.iblk2.port_mg =
developer4164cfe2022-12-01 11:27:41 +08001679 (hnat_priv->data->version == MTK_HNAT_V1_1) ? 0x3f : 0;
developer24948202021-11-24 17:38:27 +08001680
developeraf07fad2021-11-19 17:53:42 +08001681 if (qos_toggle) {
developer4164cfe2022-12-01 11:27:41 +08001682 if (hnat_priv->data->version == MTK_HNAT_V2 ||
1683 hnat_priv->data->version == MTK_HNAT_V3) {
developeraf07fad2021-11-19 17:53:42 +08001684 entry.ipv6_5t_route.iblk2.qid = qid & 0x7f;
1685 } else {
1686 /* qid[5:0]= port_mg[1:0]+ qid[3:0] */
1687 entry.ipv6_5t_route.iblk2.qid = qid & 0xf;
developer4164cfe2022-12-01 11:27:41 +08001688 if (hnat_priv->data->version != MTK_HNAT_V1_1)
developeraf07fad2021-11-19 17:53:42 +08001689 entry.ipv6_5t_route.iblk2.port_mg |=
1690 ((qid >> 4) & 0x3);
developerfd40db22021-04-29 10:08:25 +08001691
developerd35bbcc2022-09-28 22:46:01 +08001692 if (IS_EXT(dev) && (FROM_GE_LAN_GRP(skb) ||
developeraf07fad2021-11-19 17:53:42 +08001693 FROM_GE_WAN(skb) || FROM_GE_VIRTUAL(skb)) &&
1694 (!whnat)) {
1695 entry.ipv6_5t_route.etype = htons(HQOS_MAGIC_TAG);
1696 entry.ipv6_5t_route.vlan1 = skb_hnat_entry(skb);
1697 entry.bfib1.vlan_layer = 1;
1698 }
developerfd40db22021-04-29 10:08:25 +08001699 }
developerfd40db22021-04-29 10:08:25 +08001700
developer34028fb2022-01-11 13:51:29 +08001701 if (FROM_EXT(skb) ||
1702 (IS_PPPQ_MODE && !IS_DSA_LAN(dev) && !IS_DSA_WAN(dev)))
developeraf07fad2021-11-19 17:53:42 +08001703 entry.ipv6_5t_route.iblk2.fqos = 0;
1704 else
developerd35bbcc2022-09-28 22:46:01 +08001705#if defined(CONFIG_MEDIATEK_NETSYS_V3)
developer934756a2022-11-18 14:51:34 +08001706 if ((IS_HQOS_UL_MODE && IS_WAN(dev)) ||
developer493adc32022-11-29 22:34:18 +08001707 (IS_HQOS_DL_MODE && IS_LAN_GRP(dev)) ||
developer934756a2022-11-18 14:51:34 +08001708 (IS_PPPQ_MODE &&
1709 IS_PPPQ_PATH(dev, skb)))
developer47545a32022-11-15 16:06:58 +08001710 entry.ipv6_5t_route.tport_id = 1;
1711 else
1712 entry.ipv6_5t_route.tport_id = 0;
developerd35bbcc2022-09-28 22:46:01 +08001713#else
developer399ec072022-06-24 16:07:41 +08001714 entry.ipv6_5t_route.iblk2.fqos =
developer934756a2022-11-18 14:51:34 +08001715 (!IS_PPPQ_MODE ||
1716 (IS_PPPQ_MODE &&
1717 IS_PPPQ_PATH(dev, skb)));
developerd35bbcc2022-09-28 22:46:01 +08001718#endif
developeraf07fad2021-11-19 17:53:42 +08001719 } else {
developerfd40db22021-04-29 10:08:25 +08001720 entry.ipv6_5t_route.iblk2.fqos = 0;
developeraf07fad2021-11-19 17:53:42 +08001721 }
developerfd40db22021-04-29 10:08:25 +08001722 }
1723
developer60e60962021-06-15 21:05:07 +08001724 /* The INFO2.port_mg and 2nd VLAN ID fields of PPE entry are redefined
1725 * by Wi-Fi whnat engine. These data and INFO2.dp will be updated and
1726 * the entry is set to BIND state in mtk_sw_nat_hook_tx().
1727 */
developer7b36dca2022-05-19 18:29:10 +08001728 if (!whnat) {
1729 entry.bfib1.ttl = 1;
developer60e60962021-06-15 21:05:07 +08001730 entry.bfib1.state = BIND;
developer7b36dca2022-05-19 18:29:10 +08001731 }
developer60e60962021-06-15 21:05:07 +08001732
developerbc552cc2022-03-15 16:19:27 +08001733 wmb();
developerfd40db22021-04-29 10:08:25 +08001734 memcpy(foe, &entry, sizeof(entry));
1735 /*reset statistic for this entry*/
developer577ad2f2022-11-28 10:33:36 +08001736 if (hnat_priv->data->per_flow_accounting &&
1737 skb_hnat_entry(skb) < hnat_priv->foe_etry_num &&
1738 skb_hnat_ppe(skb) < CFG_PPE_NUM)
developer471f6562021-05-10 20:48:34 +08001739 memset(&hnat_priv->acct[skb_hnat_ppe(skb)][skb_hnat_entry(skb)],
1740 0, sizeof(struct mib_entry));
developerfd40db22021-04-29 10:08:25 +08001741
developerfdfe1572021-09-13 16:56:33 +08001742 skb_hnat_filled(skb) = HNAT_INFO_FILLED;
developerfd40db22021-04-29 10:08:25 +08001743
1744 return 0;
1745}
1746
1747int mtk_sw_nat_hook_tx(struct sk_buff *skb, int gmac_no)
1748{
1749 struct foe_entry *entry;
1750 struct ethhdr *eth;
developerbc552cc2022-03-15 16:19:27 +08001751 struct hnat_bind_info_blk bfib1_tx;
developerfd40db22021-04-29 10:08:25 +08001752
developerfdfe1572021-09-13 16:56:33 +08001753 if (skb_hnat_alg(skb) || !is_hnat_info_filled(skb) ||
1754 !is_magic_tag_valid(skb) || !IS_SPACE_AVAILABLE_HEAD(skb))
developerfd40db22021-04-29 10:08:25 +08001755 return NF_ACCEPT;
1756
1757 trace_printk(
1758 "[%s]entry=%x reason=%x gmac_no=%x wdmaid=%x rxid=%x wcid=%x bssid=%x\n",
1759 __func__, skb_hnat_entry(skb), skb_hnat_reason(skb), gmac_no,
1760 skb_hnat_wdma_id(skb), skb_hnat_bss_id(skb),
1761 skb_hnat_wc_id(skb), skb_hnat_rx_id(skb));
1762
developer99506e52021-06-30 22:03:02 +08001763 if ((gmac_no != NR_WDMA0_PORT) && (gmac_no != NR_WDMA1_PORT) &&
1764 (gmac_no != NR_WHNAT_WDMA_PORT))
1765 return NF_ACCEPT;
1766
developerc0419aa2022-12-07 15:56:36 +08001767 if (unlikely(!skb_mac_header_was_set(skb)))
1768 return NF_ACCEPT;
1769
developerfd40db22021-04-29 10:08:25 +08001770 if (!skb_hnat_is_hashed(skb))
1771 return NF_ACCEPT;
1772
developer955a6f62021-07-26 10:54:39 +08001773 if (skb_hnat_entry(skb) >= hnat_priv->foe_etry_num ||
1774 skb_hnat_ppe(skb) >= CFG_PPE_NUM)
1775 return NF_ACCEPT;
1776
developer471f6562021-05-10 20:48:34 +08001777 entry = &hnat_priv->foe_table_cpu[skb_hnat_ppe(skb)][skb_hnat_entry(skb)];
developerfd40db22021-04-29 10:08:25 +08001778 if (entry_hnat_is_bound(entry))
1779 return NF_ACCEPT;
1780
1781 if (skb_hnat_reason(skb) != HIT_UNBIND_RATE_REACH)
1782 return NF_ACCEPT;
1783
1784 eth = eth_hdr(skb);
developerbc552cc2022-03-15 16:19:27 +08001785 memcpy(&bfib1_tx, &entry->bfib1, sizeof(entry->bfib1));
developer8116b0a2021-08-23 18:07:20 +08001786
1787 /*not bind multicast if PPE mcast not enable*/
developerfdfe1572021-09-13 16:56:33 +08001788 if (!hnat_priv->data->mcast) {
1789 if (is_multicast_ether_addr(eth->h_dest))
1790 return NF_ACCEPT;
1791
1792 if (IS_IPV4_GRP(entry))
1793 entry->ipv4_hnapt.iblk2.mcast = 0;
1794 else
1795 entry->ipv6_5t_route.iblk2.mcast = 0;
1796 }
developerfd40db22021-04-29 10:08:25 +08001797
1798 /* Some mt_wifi virtual interfaces, such as apcli,
1799 * will change the smac for specail purpose.
1800 */
developer5ffc5f12022-10-25 18:51:46 +08001801 switch ((int)bfib1_tx.pkt_type) {
developerfd40db22021-04-29 10:08:25 +08001802 case IPV4_HNAPT:
1803 case IPV4_HNAT:
1804 entry->ipv4_hnapt.smac_hi = swab32(*((u32 *)eth->h_source));
1805 entry->ipv4_hnapt.smac_lo = swab16(*((u16 *)&eth->h_source[4]));
1806 break;
1807 case IPV4_DSLITE:
1808 case IPV4_MAP_E:
1809 case IPV6_6RD:
1810 case IPV6_5T_ROUTE:
1811 case IPV6_3T_ROUTE:
developer5ffc5f12022-10-25 18:51:46 +08001812 case IPV6_HNAPT:
1813 case IPV6_HNAT:
developerfd40db22021-04-29 10:08:25 +08001814 entry->ipv6_5t_route.smac_hi = swab32(*((u32 *)eth->h_source));
1815 entry->ipv6_5t_route.smac_lo = swab16(*((u16 *)&eth->h_source[4]));
1816 break;
1817 }
1818
developer0ff76882021-10-26 10:54:13 +08001819 if (skb->vlan_tci) {
developerbc552cc2022-03-15 16:19:27 +08001820 bfib1_tx.vlan_layer = 1;
1821 bfib1_tx.vpm = 1;
developer0ff76882021-10-26 10:54:13 +08001822 if (IS_IPV4_GRP(entry)) {
1823 entry->ipv4_hnapt.etype = htons(ETH_P_8021Q);
developer00a07372022-03-11 16:04:34 +08001824 entry->ipv4_hnapt.vlan1 = skb->vlan_tci;
developer0ff76882021-10-26 10:54:13 +08001825 } else if (IS_IPV6_GRP(entry)) {
1826 entry->ipv6_5t_route.etype = htons(ETH_P_8021Q);
developer00a07372022-03-11 16:04:34 +08001827 entry->ipv6_5t_route.vlan1 = skb->vlan_tci;
developer0ff76882021-10-26 10:54:13 +08001828 }
1829 } else {
developerbc552cc2022-03-15 16:19:27 +08001830 bfib1_tx.vpm = 0;
1831 bfib1_tx.vlan_layer = 0;
developer0ff76882021-10-26 10:54:13 +08001832 }
developer60e60962021-06-15 21:05:07 +08001833
developerfd40db22021-04-29 10:08:25 +08001834 /* MT7622 wifi hw_nat not support QoS */
1835 if (IS_IPV4_GRP(entry)) {
1836 entry->ipv4_hnapt.iblk2.fqos = 0;
developer4164cfe2022-12-01 11:27:41 +08001837 if ((hnat_priv->data->version == MTK_HNAT_V1_2 &&
developere567ad32021-05-25 17:16:17 +08001838 gmac_no == NR_WHNAT_WDMA_PORT) ||
developer4164cfe2022-12-01 11:27:41 +08001839 ((hnat_priv->data->version == MTK_HNAT_V2 ||
1840 hnat_priv->data->version == MTK_HNAT_V3) &&
developere567ad32021-05-25 17:16:17 +08001841 (gmac_no == NR_WDMA0_PORT || gmac_no == NR_WDMA1_PORT))) {
developerfd40db22021-04-29 10:08:25 +08001842 entry->ipv4_hnapt.winfo.bssid = skb_hnat_bss_id(skb);
1843 entry->ipv4_hnapt.winfo.wcid = skb_hnat_wc_id(skb);
developerd35bbcc2022-09-28 22:46:01 +08001844#if defined(CONFIG_MEDIATEK_NETSYS_V3)
developer47545a32022-11-15 16:06:58 +08001845 entry->ipv4_hnapt.tport_id = IS_HQOS_DL_MODE ? 1 : 0;
developerd35bbcc2022-09-28 22:46:01 +08001846 entry->ipv4_hnapt.iblk2.rxid = skb_hnat_rx_id(skb);
1847 entry->ipv4_hnapt.iblk2.winfoi = 1;
1848 entry->ipv4_hnapt.winfo_pao.usr_info =
1849 skb_hnat_usr_info(skb);
1850 entry->ipv4_hnapt.winfo_pao.tid = skb_hnat_tid(skb);
1851 entry->ipv4_hnapt.winfo_pao.is_fixedrate =
1852 skb_hnat_is_fixedrate(skb);
1853 entry->ipv4_hnapt.winfo_pao.is_prior =
1854 skb_hnat_is_prior(skb);
1855 entry->ipv4_hnapt.winfo_pao.is_sp = skb_hnat_is_sp(skb);
1856 entry->ipv4_hnapt.winfo_pao.hf = skb_hnat_hf(skb);
1857 entry->ipv4_hnapt.winfo_pao.amsdu = skb_hnat_amsdu(skb);
1858#elif defined(CONFIG_MEDIATEK_NETSYS_V2)
developerfd40db22021-04-29 10:08:25 +08001859 entry->ipv4_hnapt.iblk2.rxid = skb_hnat_rx_id(skb);
1860 entry->ipv4_hnapt.iblk2.winfoi = 1;
1861#else
1862 entry->ipv4_hnapt.winfo.rxid = skb_hnat_rx_id(skb);
1863 entry->ipv4_hnapt.iblk2w.winfoi = 1;
1864 entry->ipv4_hnapt.iblk2w.wdmaid = skb_hnat_wdma_id(skb);
1865#endif
1866 } else {
1867 if (IS_GMAC1_MODE && !hnat_dsa_is_enable(hnat_priv)) {
developerbc552cc2022-03-15 16:19:27 +08001868 bfib1_tx.vpm = 1;
1869 bfib1_tx.vlan_layer = 1;
developerfd40db22021-04-29 10:08:25 +08001870
developerd35bbcc2022-09-28 22:46:01 +08001871 if (FROM_GE_LAN_GRP(skb))
developerfd40db22021-04-29 10:08:25 +08001872 entry->ipv4_hnapt.vlan1 = 1;
1873 else if (FROM_GE_WAN(skb) || FROM_GE_VIRTUAL(skb))
1874 entry->ipv4_hnapt.vlan1 = 2;
1875 }
1876
developer34028fb2022-01-11 13:51:29 +08001877 if (IS_HQOS_MODE &&
developerd35bbcc2022-09-28 22:46:01 +08001878 (FROM_GE_LAN_GRP(skb) || FROM_GE_WAN(skb) || FROM_GE_VIRTUAL(skb))) {
developerbc552cc2022-03-15 16:19:27 +08001879 bfib1_tx.vpm = 0;
1880 bfib1_tx.vlan_layer = 1;
developerfd40db22021-04-29 10:08:25 +08001881 entry->ipv4_hnapt.etype = htons(HQOS_MAGIC_TAG);
1882 entry->ipv4_hnapt.vlan1 = skb_hnat_entry(skb);
1883 entry->ipv4_hnapt.iblk2.fqos = 1;
1884 }
developerfd40db22021-04-29 10:08:25 +08001885 }
1886 entry->ipv4_hnapt.iblk2.dp = gmac_no;
developer5ffc5f12022-10-25 18:51:46 +08001887#if defined(CONFIG_MEDIATEK_NETSYS_V3)
1888 } else if (IS_IPV6_HNAPT(entry) || IS_IPV6_HNAT(entry)) {
1889 entry->ipv6_hnapt.iblk2.dp = gmac_no;
1890 entry->ipv6_hnapt.iblk2.rxid = skb_hnat_rx_id(skb);
1891 entry->ipv6_hnapt.iblk2.winfoi = 1;
1892
1893 entry->ipv6_hnapt.winfo.bssid = skb_hnat_bss_id(skb);
1894 entry->ipv6_hnapt.winfo.wcid = skb_hnat_wc_id(skb);
1895 entry->ipv6_hnapt.winfo_pao.usr_info = skb_hnat_usr_info(skb);
1896 entry->ipv6_hnapt.winfo_pao.tid = skb_hnat_tid(skb);
1897 entry->ipv6_hnapt.winfo_pao.is_fixedrate =
1898 skb_hnat_is_fixedrate(skb);
1899 entry->ipv6_hnapt.winfo_pao.is_prior = skb_hnat_is_prior(skb);
1900 entry->ipv6_hnapt.winfo_pao.is_sp = skb_hnat_is_sp(skb);
1901 entry->ipv6_hnapt.winfo_pao.hf = skb_hnat_hf(skb);
1902 entry->ipv6_hnapt.winfo_pao.amsdu = skb_hnat_amsdu(skb);
developer47545a32022-11-15 16:06:58 +08001903 entry->ipv6_hnapt.tport_id = IS_HQOS_DL_MODE ? 1 : 0;
developer5ffc5f12022-10-25 18:51:46 +08001904#endif
developerfd40db22021-04-29 10:08:25 +08001905 } else {
1906 entry->ipv6_5t_route.iblk2.fqos = 0;
developer4164cfe2022-12-01 11:27:41 +08001907 if ((hnat_priv->data->version == MTK_HNAT_V1_2 &&
developere567ad32021-05-25 17:16:17 +08001908 gmac_no == NR_WHNAT_WDMA_PORT) ||
developer4164cfe2022-12-01 11:27:41 +08001909 ((hnat_priv->data->version == MTK_HNAT_V2 ||
1910 hnat_priv->data->version == MTK_HNAT_V3) &&
developere567ad32021-05-25 17:16:17 +08001911 (gmac_no == NR_WDMA0_PORT || gmac_no == NR_WDMA1_PORT))) {
developerfd40db22021-04-29 10:08:25 +08001912 entry->ipv6_5t_route.winfo.bssid = skb_hnat_bss_id(skb);
1913 entry->ipv6_5t_route.winfo.wcid = skb_hnat_wc_id(skb);
developerd35bbcc2022-09-28 22:46:01 +08001914#if defined(CONFIG_MEDIATEK_NETSYS_V3)
developer47545a32022-11-15 16:06:58 +08001915 entry->ipv6_5t_route.tport_id = IS_HQOS_DL_MODE ? 1 : 0;
developerfd40db22021-04-29 10:08:25 +08001916 entry->ipv6_5t_route.iblk2.rxid = skb_hnat_rx_id(skb);
1917 entry->ipv6_5t_route.iblk2.winfoi = 1;
developerd35bbcc2022-09-28 22:46:01 +08001918 entry->ipv6_5t_route.winfo_pao.usr_info =
1919 skb_hnat_usr_info(skb);
1920 entry->ipv6_5t_route.winfo_pao.tid =
1921 skb_hnat_tid(skb);
1922 entry->ipv6_5t_route.winfo_pao.is_fixedrate =
1923 skb_hnat_is_fixedrate(skb);
1924 entry->ipv6_5t_route.winfo_pao.is_prior =
1925 skb_hnat_is_prior(skb);
1926 entry->ipv6_5t_route.winfo_pao.is_sp =
1927 skb_hnat_is_sp(skb);
1928 entry->ipv6_5t_route.winfo_pao.hf =
1929 skb_hnat_hf(skb);
1930 entry->ipv6_5t_route.winfo_pao.amsdu =
1931 skb_hnat_amsdu(skb);
1932#elif defined(CONFIG_MEDIATEK_NETSYS_V2)
1933 entry->ipv6_5t_route.iblk2.rxid = skb_hnat_rx_id(skb);
1934 entry->ipv6_5t_route.iblk2.winfoi = 1;
developerfd40db22021-04-29 10:08:25 +08001935#else
1936 entry->ipv6_5t_route.winfo.rxid = skb_hnat_rx_id(skb);
1937 entry->ipv6_5t_route.iblk2w.winfoi = 1;
1938 entry->ipv6_5t_route.iblk2w.wdmaid = skb_hnat_wdma_id(skb);
1939#endif
1940 } else {
1941 if (IS_GMAC1_MODE && !hnat_dsa_is_enable(hnat_priv)) {
developerbc552cc2022-03-15 16:19:27 +08001942 bfib1_tx.vpm = 1;
1943 bfib1_tx.vlan_layer = 1;
developerfd40db22021-04-29 10:08:25 +08001944
developerd35bbcc2022-09-28 22:46:01 +08001945 if (FROM_GE_LAN_GRP(skb))
developerfd40db22021-04-29 10:08:25 +08001946 entry->ipv6_5t_route.vlan1 = 1;
1947 else if (FROM_GE_WAN(skb) || FROM_GE_VIRTUAL(skb))
1948 entry->ipv6_5t_route.vlan1 = 2;
1949 }
1950
developer34028fb2022-01-11 13:51:29 +08001951 if (IS_HQOS_MODE &&
developerd35bbcc2022-09-28 22:46:01 +08001952 (FROM_GE_LAN_GRP(skb) || FROM_GE_WAN(skb) || FROM_GE_VIRTUAL(skb))) {
developerbc552cc2022-03-15 16:19:27 +08001953 bfib1_tx.vpm = 0;
1954 bfib1_tx.vlan_layer = 1;
developerfd40db22021-04-29 10:08:25 +08001955 entry->ipv6_5t_route.etype = htons(HQOS_MAGIC_TAG);
1956 entry->ipv6_5t_route.vlan1 = skb_hnat_entry(skb);
1957 entry->ipv6_5t_route.iblk2.fqos = 1;
1958 }
developerfd40db22021-04-29 10:08:25 +08001959 }
1960 entry->ipv6_5t_route.iblk2.dp = gmac_no;
1961 }
1962
developer7b36dca2022-05-19 18:29:10 +08001963 bfib1_tx.ttl = 1;
developerbc552cc2022-03-15 16:19:27 +08001964 bfib1_tx.state = BIND;
1965 wmb();
1966 memcpy(&entry->bfib1, &bfib1_tx, sizeof(bfib1_tx));
developerfd40db22021-04-29 10:08:25 +08001967
1968 return NF_ACCEPT;
1969}
1970
1971int mtk_sw_nat_hook_rx(struct sk_buff *skb)
1972{
developer99506e52021-06-30 22:03:02 +08001973 if (!IS_SPACE_AVAILABLE_HEAD(skb) || !FROM_WED(skb)) {
1974 skb_hnat_magic_tag(skb) = 0;
developerfd40db22021-04-29 10:08:25 +08001975 return NF_ACCEPT;
developer99506e52021-06-30 22:03:02 +08001976 }
developerfd40db22021-04-29 10:08:25 +08001977
1978 skb_hnat_alg(skb) = 0;
developerfdfe1572021-09-13 16:56:33 +08001979 skb_hnat_filled(skb) = 0;
developerfd40db22021-04-29 10:08:25 +08001980 skb_hnat_magic_tag(skb) = HNAT_MAGIC_TAG;
1981
1982 if (skb_hnat_iface(skb) == FOE_MAGIC_WED0)
1983 skb_hnat_sport(skb) = NR_WDMA0_PORT;
1984 else if (skb_hnat_iface(skb) == FOE_MAGIC_WED1)
1985 skb_hnat_sport(skb) = NR_WDMA1_PORT;
1986
1987 return NF_ACCEPT;
1988}
1989
1990void mtk_ppe_dev_register_hook(struct net_device *dev)
1991{
1992 int i, number = 0;
1993 struct extdev_entry *ext_entry;
1994
developerfd40db22021-04-29 10:08:25 +08001995 for (i = 1; i < MAX_IF_NUM; i++) {
1996 if (hnat_priv->wifi_hook_if[i] == dev) {
1997 pr_info("%s : %s has been registered in wifi_hook_if table[%d]\n",
1998 __func__, dev->name, i);
1999 return;
2000 }
developera7e6c242022-12-05 13:52:40 +08002001 }
2002
2003 for (i = 1; i < MAX_IF_NUM; i++) {
developerfd40db22021-04-29 10:08:25 +08002004 if (!hnat_priv->wifi_hook_if[i]) {
2005 if (find_extif_from_devname(dev->name)) {
2006 extif_set_dev(dev);
2007 goto add_wifi_hook_if;
2008 }
2009
2010 number = get_ext_device_number();
2011 if (number >= MAX_EXT_DEVS) {
2012 pr_info("%s : extdev array is full. %s is not registered\n",
2013 __func__, dev->name);
2014 return;
2015 }
2016
2017 ext_entry = kzalloc(sizeof(*ext_entry), GFP_KERNEL);
2018 if (!ext_entry)
2019 return;
2020
developer4c32b7a2021-11-13 16:46:43 +08002021 strncpy(ext_entry->name, dev->name, IFNAMSIZ - 1);
developerfd40db22021-04-29 10:08:25 +08002022 dev_hold(dev);
2023 ext_entry->dev = dev;
2024 ext_if_add(ext_entry);
2025
2026add_wifi_hook_if:
2027 dev_hold(dev);
2028 hnat_priv->wifi_hook_if[i] = dev;
2029
2030 break;
2031 }
2032 }
2033 pr_info("%s : ineterface %s register (%d)\n", __func__, dev->name, i);
2034}
2035
2036void mtk_ppe_dev_unregister_hook(struct net_device *dev)
2037{
2038 int i;
2039
2040 for (i = 1; i < MAX_IF_NUM; i++) {
2041 if (hnat_priv->wifi_hook_if[i] == dev) {
2042 hnat_priv->wifi_hook_if[i] = NULL;
2043 dev_put(dev);
2044
2045 break;
2046 }
2047 }
2048
2049 extif_put_dev(dev);
2050 pr_info("%s : ineterface %s set null (%d)\n", __func__, dev->name, i);
2051}
2052
2053static unsigned int mtk_hnat_accel_type(struct sk_buff *skb)
2054{
2055 struct dst_entry *dst;
2056 struct nf_conn *ct;
2057 enum ip_conntrack_info ctinfo;
2058 const struct nf_conn_help *help;
2059
2060 /* Do not accelerate 1st round of xfrm flow, and 2nd round of xfrm flow
2061 * is from local_out which is also filtered in sanity check.
2062 */
2063 dst = skb_dst(skb);
2064 if (dst && dst_xfrm(dst))
2065 return 0;
2066
2067 ct = nf_ct_get(skb, &ctinfo);
2068 if (!ct)
2069 return 1;
2070
2071 /* rcu_read_lock()ed by nf_hook_slow */
2072 help = nfct_help(ct);
2073 if (help && rcu_dereference(help->helper))
2074 return 0;
2075
2076 return 1;
2077}
2078
developer6f4a0c72021-10-19 10:04:22 +08002079static void mtk_hnat_dscp_update(struct sk_buff *skb, struct foe_entry *entry)
2080{
2081 struct iphdr *iph;
2082 struct ethhdr *eth;
2083 struct ipv6hdr *ip6h;
2084 bool flag = false;
2085
2086 eth = eth_hdr(skb);
2087 switch (ntohs(eth->h_proto)) {
2088 case ETH_P_IP:
2089 iph = ip_hdr(skb);
developer001e7be2021-12-09 15:00:27 +08002090 if (IS_IPV4_GRP(entry) && entry->ipv4_hnapt.iblk2.dscp != iph->tos)
developer6f4a0c72021-10-19 10:04:22 +08002091 flag = true;
2092 break;
2093 case ETH_P_IPV6:
2094 ip6h = ipv6_hdr(skb);
developer001e7be2021-12-09 15:00:27 +08002095 if ((IS_IPV6_3T_ROUTE(entry) || IS_IPV6_5T_ROUTE(entry)) &&
2096 (entry->ipv6_5t_route.iblk2.dscp !=
2097 (ip6h->priority << 4 | (ip6h->flow_lbl[0] >> 4))))
developer6f4a0c72021-10-19 10:04:22 +08002098 flag = true;
2099 break;
2100 default:
2101 return;
2102 }
2103
2104 if (flag) {
developer1080dd82022-03-07 19:31:04 +08002105 if (debug_level >= 2)
2106 pr_info("Delete entry idx=%d.\n", skb_hnat_entry(skb));
developer6f4a0c72021-10-19 10:04:22 +08002107 memset(entry, 0, sizeof(struct foe_entry));
2108 hnat_cache_ebl(1);
2109 }
2110}
2111
developer30a47682021-11-02 17:06:14 +08002112static void mtk_hnat_nf_update(struct sk_buff *skb)
2113{
2114 struct nf_conn *ct;
2115 struct nf_conn_acct *acct;
2116 struct nf_conn_counter *counter;
2117 enum ip_conntrack_info ctinfo;
2118 struct hnat_accounting diff;
2119
2120 ct = nf_ct_get(skb, &ctinfo);
2121 if (ct) {
2122 if (!hnat_get_count(hnat_priv, skb_hnat_ppe(skb), skb_hnat_entry(skb), &diff))
2123 return;
2124
2125 acct = nf_conn_acct_find(ct);
2126 if (acct) {
2127 counter = acct->counter;
2128 atomic64_add(diff.packets, &counter[CTINFO2DIR(ctinfo)].packets);
2129 atomic64_add(diff.bytes, &counter[CTINFO2DIR(ctinfo)].bytes);
2130 }
2131 }
developere8b7dfa2023-04-20 10:16:44 +08002132}
2133
2134int mtk_464xlat_fill_mac(struct foe_entry *entry, struct sk_buff *skb,
2135 const struct net_device *out, bool l2w)
2136{
2137 const struct in6_addr *ipv6_nexthop;
2138 struct dst_entry *dst = skb_dst(skb);
2139 struct neighbour *neigh = NULL;
2140 struct rtable *rt = (struct rtable *)dst;
2141 u32 nexthop;
2142
2143 rcu_read_lock_bh();
2144 if (l2w) {
2145 ipv6_nexthop = rt6_nexthop((struct rt6_info *)dst,
2146 &ipv6_hdr(skb)->daddr);
2147 neigh = __ipv6_neigh_lookup_noref(dst->dev, ipv6_nexthop);
2148 if (unlikely(!neigh)) {
2149 dev_notice(hnat_priv->dev, "%s:No neigh (daddr=%pI6)\n",
2150 __func__, &ipv6_hdr(skb)->daddr);
2151 rcu_read_unlock_bh();
2152 return -1;
2153 }
2154 } else {
2155 nexthop = (__force u32)rt_nexthop(rt, ip_hdr(skb)->daddr);
2156 neigh = __ipv4_neigh_lookup_noref(dst->dev, nexthop);
2157 if (unlikely(!neigh)) {
2158 dev_notice(hnat_priv->dev, "%s:No neigh (daddr=%pI4)\n",
2159 __func__, &ip_hdr(skb)->daddr);
2160 rcu_read_unlock_bh();
2161 return -1;
2162 }
2163 }
2164 rcu_read_unlock_bh();
2165
2166 entry->ipv4_dslite.dmac_hi = swab32(*((u32 *)neigh->ha));
2167 entry->ipv4_dslite.dmac_lo = swab16(*((u16 *)&neigh->ha[4]));
2168 entry->ipv4_dslite.smac_hi = swab32(*((u32 *)out->dev_addr));
2169 entry->ipv4_dslite.smac_lo = swab16(*((u16 *)&out->dev_addr[4]));
2170
2171 return 0;
2172}
2173
2174int mtk_464xlat_get_hash(struct sk_buff *skb, u32 *hash, bool l2w)
2175{
2176 struct in6_addr addr_v6, prefix;
2177 struct ipv6hdr *ip6h;
2178 struct iphdr *iph;
2179 struct tcpudphdr *pptr, _ports;
2180 struct foe_entry tmp;
2181 u32 addr, protoff;
2182
2183 if (l2w) {
2184 ip6h = ipv6_hdr(skb);
2185 if (mtk_ppe_get_xlat_v4_by_v6(&ip6h->daddr, &addr))
2186 return -1;
2187 protoff = IPV6_HDR_LEN;
2188
2189 tmp.bfib1.pkt_type = IPV4_HNAPT;
2190 tmp.ipv4_hnapt.sip = ntohl(ip6h->saddr.s6_addr32[3]);
2191 tmp.ipv4_hnapt.dip = ntohl(addr);
2192 } else {
2193 iph = ip_hdr(skb);
2194 if (mtk_ppe_get_xlat_v6_by_v4(&iph->saddr, &addr_v6, &prefix))
2195 return -1;
2196
2197 protoff = iph->ihl * 4;
2198
2199 tmp.bfib1.pkt_type = IPV6_5T_ROUTE;
2200 tmp.ipv6_5t_route.ipv6_sip0 = ntohl(addr_v6.s6_addr32[0]);
2201 tmp.ipv6_5t_route.ipv6_sip1 = ntohl(addr_v6.s6_addr32[1]);
2202 tmp.ipv6_5t_route.ipv6_sip2 = ntohl(addr_v6.s6_addr32[2]);
2203 tmp.ipv6_5t_route.ipv6_sip3 = ntohl(addr_v6.s6_addr32[3]);
2204 tmp.ipv6_5t_route.ipv6_dip0 = ntohl(prefix.s6_addr32[0]);
2205 tmp.ipv6_5t_route.ipv6_dip1 = ntohl(prefix.s6_addr32[1]);
2206 tmp.ipv6_5t_route.ipv6_dip2 = ntohl(prefix.s6_addr32[2]);
2207 tmp.ipv6_5t_route.ipv6_dip3 = ntohl(iph->daddr);
2208 }
2209
2210 pptr = skb_header_pointer(skb, protoff,
2211 sizeof(_ports), &_ports);
2212 if (unlikely(!pptr))
2213 return -1;
2214
2215 if (l2w) {
2216 tmp.ipv4_hnapt.sport = ntohs(pptr->src);
2217 tmp.ipv4_hnapt.dport = ntohs(pptr->dst);
2218 } else {
2219 tmp.ipv6_5t_route.sport = ntohs(pptr->src);
2220 tmp.ipv6_5t_route.dport = ntohs(pptr->dst);
2221 }
2222
2223 *hash = hnat_get_ppe_hash(&tmp);
2224
2225 return 0;
2226}
2227
2228void mtk_464xlat_fill_info1(struct foe_entry *entry,
2229 struct sk_buff *skb, bool l2w)
2230{
2231 entry->bfib1.cah = 1;
2232 entry->bfib1.ttl = 1;
2233 entry->bfib1.state = BIND;
2234 entry->bfib1.time_stamp = readl(hnat_priv->fe_base + 0x0010) & (0xFF);
2235 if (l2w) {
2236 entry->bfib1.pkt_type = IPV4_DSLITE;
2237 entry->bfib1.udp = ipv6_hdr(skb)->nexthdr ==
2238 IPPROTO_UDP ? 1 : 0;
2239 } else {
2240 entry->bfib1.pkt_type = IPV6_6RD;
2241 entry->bfib1.udp = ip_hdr(skb)->protocol ==
2242 IPPROTO_UDP ? 1 : 0;
2243 }
2244}
2245
2246void mtk_464xlat_fill_info2(struct foe_entry *entry, bool l2w)
2247{
2248 entry->ipv4_dslite.iblk2.mibf = 1;
2249 entry->ipv4_dslite.iblk2.port_ag = 0xF;
2250
2251 if (l2w)
2252 entry->ipv4_dslite.iblk2.dp = NR_GMAC2_PORT;
2253 else
2254 entry->ipv6_6rd.iblk2.dp = NR_GMAC1_PORT;
2255}
2256
2257void mtk_464xlat_fill_ipv4(struct foe_entry *entry, struct sk_buff *skb,
2258 struct foe_entry *foe, bool l2w)
2259{
2260 struct iphdr *iph;
2261
2262 if (l2w) {
2263 entry->ipv4_dslite.sip = foe->ipv4_dslite.sip;
2264 entry->ipv4_dslite.dip = foe->ipv4_dslite.dip;
2265 entry->ipv4_dslite.sport = foe->ipv4_dslite.sport;
2266 entry->ipv4_dslite.dport = foe->ipv4_dslite.dport;
2267 } else {
2268 iph = ip_hdr(skb);
2269 entry->ipv6_6rd.tunnel_sipv4 = ntohl(iph->saddr);
2270 entry->ipv6_6rd.tunnel_dipv4 = ntohl(iph->daddr);
2271 entry->ipv6_6rd.sport = foe->ipv6_6rd.sport;
2272 entry->ipv6_6rd.dport = foe->ipv6_6rd.dport;
2273 entry->ipv6_6rd.hdr_chksum = ppe_get_chkbase(iph);
2274 entry->ipv6_6rd.ttl = iph->ttl;
2275 entry->ipv6_6rd.dscp = iph->tos;
2276 entry->ipv6_6rd.flag = (ntohs(iph->frag_off) >> 13);
2277 }
2278}
2279
2280int mtk_464xlat_fill_ipv6(struct foe_entry *entry, struct sk_buff *skb,
2281 struct foe_entry *foe, bool l2w)
2282{
2283 struct ipv6hdr *ip6h;
2284 struct in6_addr addr_v6, prefix;
2285 u32 addr;
2286
2287 if (l2w) {
2288 ip6h = ipv6_hdr(skb);
2289
2290 if (mtk_ppe_get_xlat_v4_by_v6(&ip6h->daddr, &addr))
2291 return -1;
2292
2293 if (mtk_ppe_get_xlat_v6_by_v4(&addr, &addr_v6, &prefix))
2294 return -1;
2295
2296 entry->ipv4_dslite.tunnel_sipv6_0 =
2297 ntohl(prefix.s6_addr32[0]);
2298 entry->ipv4_dslite.tunnel_sipv6_1 =
2299 ntohl(ip6h->saddr.s6_addr32[1]);
2300 entry->ipv4_dslite.tunnel_sipv6_2 =
2301 ntohl(ip6h->saddr.s6_addr32[2]);
2302 entry->ipv4_dslite.tunnel_sipv6_3 =
2303 ntohl(ip6h->saddr.s6_addr32[3]);
2304 entry->ipv4_dslite.tunnel_dipv6_0 =
2305 ntohl(ip6h->daddr.s6_addr32[0]);
2306 entry->ipv4_dslite.tunnel_dipv6_1 =
2307 ntohl(ip6h->daddr.s6_addr32[1]);
2308 entry->ipv4_dslite.tunnel_dipv6_2 =
2309 ntohl(ip6h->daddr.s6_addr32[2]);
2310 entry->ipv4_dslite.tunnel_dipv6_3 =
2311 ntohl(ip6h->daddr.s6_addr32[3]);
2312
2313 ppe_fill_flow_lbl(entry, ip6h);
2314 entry->ipv4_dslite.priority = ip6h->priority;
2315 entry->ipv4_dslite.hop_limit = ip6h->hop_limit;
2316
2317 } else {
2318 entry->ipv6_6rd.ipv6_sip0 = foe->ipv6_6rd.ipv6_sip0;
2319 entry->ipv6_6rd.ipv6_sip1 = foe->ipv6_6rd.ipv6_sip1;
2320 entry->ipv6_6rd.ipv6_sip2 = foe->ipv6_6rd.ipv6_sip2;
2321 entry->ipv6_6rd.ipv6_sip3 = foe->ipv6_6rd.ipv6_sip3;
2322 entry->ipv6_6rd.ipv6_dip0 = foe->ipv6_6rd.ipv6_dip0;
2323 entry->ipv6_6rd.ipv6_dip1 = foe->ipv6_6rd.ipv6_dip1;
2324 entry->ipv6_6rd.ipv6_dip2 = foe->ipv6_6rd.ipv6_dip2;
2325 entry->ipv6_6rd.ipv6_dip3 = foe->ipv6_6rd.ipv6_dip3;
2326 }
2327
2328 return 0;
2329}
2330
2331int mtk_464xlat_fill_l2(struct foe_entry *entry, struct sk_buff *skb,
2332 const struct net_device *dev, bool l2w)
2333{
2334 const unsigned int *port_reg;
2335 int port_index;
2336 u16 sp_tag;
2337
2338 if (l2w)
2339 entry->ipv4_dslite.etype = ETH_P_IP;
2340 else {
2341 if (IS_DSA_LAN(dev)) {
2342 port_reg = of_get_property(dev->dev.of_node,
2343 "reg", NULL);
2344 if (unlikely(!port_reg))
2345 return -1;
2346
2347 port_index = be32_to_cpup(port_reg);
2348 sp_tag = BIT(port_index);
2349
2350 entry->bfib1.vlan_layer = 1;
2351 entry->bfib1.vpm = 0;
2352 entry->ipv6_6rd.etype = sp_tag;
2353 } else
2354 entry->ipv6_6rd.etype = ETH_P_IPV6;
2355 }
2356
2357 if (mtk_464xlat_fill_mac(entry, skb, dev, l2w))
2358 return -1;
2359
2360 return 0;
developer30a47682021-11-02 17:06:14 +08002361}
2362
developere8b7dfa2023-04-20 10:16:44 +08002363
2364int mtk_464xlat_fill_l3(struct foe_entry *entry, struct sk_buff *skb,
2365 struct foe_entry *foe, bool l2w)
2366{
2367 mtk_464xlat_fill_ipv4(entry, skb, foe, l2w);
2368
2369 if (mtk_464xlat_fill_ipv6(entry, skb, foe, l2w))
2370 return -1;
2371
2372 return 0;
2373}
2374
2375int mtk_464xlat_post_process(struct sk_buff *skb, const struct net_device *out)
2376{
2377 struct foe_entry *foe, entry = {};
2378 u32 hash;
2379 bool l2w;
2380
2381 if (skb->protocol == htons(ETH_P_IPV6))
2382 l2w = true;
2383 else if (skb->protocol == htons(ETH_P_IP))
2384 l2w = false;
2385 else
2386 return -1;
2387
2388 if (mtk_464xlat_get_hash(skb, &hash, l2w))
2389 return -1;
2390
2391 if (hash >= hnat_priv->foe_etry_num)
2392 return -1;
2393
2394 if (headroom[hash].crsn != HIT_UNBIND_RATE_REACH)
2395 return -1;
2396
2397 foe = &hnat_priv->foe_table_cpu[headroom_ppe(headroom[hash])][hash];
2398
2399 mtk_464xlat_fill_info1(&entry, skb, l2w);
2400
2401 if (mtk_464xlat_fill_l3(&entry, skb, foe, l2w))
2402 return -1;
2403
2404 mtk_464xlat_fill_info2(&entry, l2w);
2405
2406 if (mtk_464xlat_fill_l2(&entry, skb, out, l2w))
2407 return -1;
2408
2409 /* We must ensure all info has been updated before set to hw */
2410 wmb();
2411 memcpy(foe, &entry, sizeof(struct foe_entry));
2412
2413 return 0;
2414}
2415
developerfd40db22021-04-29 10:08:25 +08002416static unsigned int mtk_hnat_nf_post_routing(
2417 struct sk_buff *skb, const struct net_device *out,
2418 unsigned int (*fn)(struct sk_buff *, const struct net_device *,
2419 struct flow_offload_hw_path *),
2420 const char *func)
2421{
2422 struct foe_entry *entry;
2423 struct flow_offload_hw_path hw_path = { .dev = (struct net_device*)out,
developere5763512021-05-21 01:04:58 +08002424 .virt_dev = (struct net_device*)out };
developerfd40db22021-04-29 10:08:25 +08002425 const struct net_device *arp_dev = out;
2426
developere8b7dfa2023-04-20 10:16:44 +08002427 if (xlat_toggle && !mtk_464xlat_post_process(skb, out))
2428 return 0;
2429
developerfd40db22021-04-29 10:08:25 +08002430 if (skb_hnat_alg(skb) || unlikely(!is_magic_tag_valid(skb) ||
2431 !IS_SPACE_AVAILABLE_HEAD(skb)))
2432 return 0;
2433
developerc0419aa2022-12-07 15:56:36 +08002434 if (unlikely(!skb_mac_header_was_set(skb)))
2435 return 0;
2436
developerfd40db22021-04-29 10:08:25 +08002437 if (unlikely(!skb_hnat_is_hashed(skb)))
2438 return 0;
2439
2440 if (out->netdev_ops->ndo_flow_offload_check) {
developere5763512021-05-21 01:04:58 +08002441 out->netdev_ops->ndo_flow_offload_check(&hw_path);
developerfd40db22021-04-29 10:08:25 +08002442 out = (IS_GMAC1_MODE) ? hw_path.virt_dev : hw_path.dev;
2443 }
2444
developerd35bbcc2022-09-28 22:46:01 +08002445 if (!IS_LAN_GRP(out) && !IS_WAN(out) && !IS_EXT(out))
developerfd40db22021-04-29 10:08:25 +08002446 return 0;
2447
2448 trace_printk("[%s] case hit, %x-->%s, reason=%x\n", __func__,
2449 skb_hnat_iface(skb), out->name, skb_hnat_reason(skb));
2450
developer577ad2f2022-11-28 10:33:36 +08002451 if (skb_hnat_entry(skb) >= hnat_priv->foe_etry_num ||
2452 skb_hnat_ppe(skb) >= CFG_PPE_NUM)
2453 return -1;
2454
developer471f6562021-05-10 20:48:34 +08002455 entry = &hnat_priv->foe_table_cpu[skb_hnat_ppe(skb)][skb_hnat_entry(skb)];
developerfd40db22021-04-29 10:08:25 +08002456
2457 switch (skb_hnat_reason(skb)) {
2458 case HIT_UNBIND_RATE_REACH:
2459 if (entry_hnat_is_bound(entry))
2460 break;
2461
2462 if (fn && !mtk_hnat_accel_type(skb))
2463 break;
2464
2465 if (fn && fn(skb, arp_dev, &hw_path))
2466 break;
2467
2468 skb_to_hnat_info(skb, out, entry, &hw_path);
2469 break;
2470 case HIT_BIND_KEEPALIVE_DUP_OLD_HDR:
developer30a47682021-11-02 17:06:14 +08002471 /* update hnat count to nf_conntrack by keepalive */
2472 if (hnat_priv->data->per_flow_accounting && hnat_priv->nf_stat_en)
2473 mtk_hnat_nf_update(skb);
2474
developerfd40db22021-04-29 10:08:25 +08002475 if (fn && !mtk_hnat_accel_type(skb))
2476 break;
2477
developer6f4a0c72021-10-19 10:04:22 +08002478 /* update dscp for qos */
2479 mtk_hnat_dscp_update(skb, entry);
2480
developerfd40db22021-04-29 10:08:25 +08002481 /* update mcast timestamp*/
developer4164cfe2022-12-01 11:27:41 +08002482 if (hnat_priv->data->version == MTK_HNAT_V1_3 &&
developerfd40db22021-04-29 10:08:25 +08002483 hnat_priv->data->mcast && entry->bfib1.sta == 1)
2484 entry->ipv4_hnapt.m_timestamp = foe_timestamp(hnat_priv);
2485
2486 if (entry_hnat_is_bound(entry)) {
2487 memset(skb_hnat_info(skb), 0, FOE_INFO_LEN);
2488
2489 return -1;
2490 }
2491 break;
2492 case HIT_BIND_MULTICAST_TO_CPU:
2493 case HIT_BIND_MULTICAST_TO_GMAC_CPU:
2494 /*do not forward to gdma again,if ppe already done it*/
developerd35bbcc2022-09-28 22:46:01 +08002495 if (IS_LAN_GRP(out) || IS_WAN(out))
developerfd40db22021-04-29 10:08:25 +08002496 return -1;
2497 break;
2498 }
2499
2500 return 0;
2501}
2502
2503static unsigned int
2504mtk_hnat_ipv6_nf_local_out(void *priv, struct sk_buff *skb,
2505 const struct nf_hook_state *state)
2506{
2507 struct foe_entry *entry;
2508 struct ipv6hdr *ip6h;
2509 struct iphdr _iphdr;
2510 const struct iphdr *iph;
2511 struct tcpudphdr _ports;
2512 const struct tcpudphdr *pptr;
2513 int udp = 0;
2514
2515 if (unlikely(!skb_hnat_is_hashed(skb)))
2516 return NF_ACCEPT;
2517
developer577ad2f2022-11-28 10:33:36 +08002518 if (skb_hnat_entry(skb) >= hnat_priv->foe_etry_num ||
2519 skb_hnat_ppe(skb) >= CFG_PPE_NUM)
2520 return NF_ACCEPT;
2521
developer471f6562021-05-10 20:48:34 +08002522 entry = &hnat_priv->foe_table_cpu[skb_hnat_ppe(skb)][skb_hnat_entry(skb)];
developerfd40db22021-04-29 10:08:25 +08002523 if (skb_hnat_reason(skb) == HIT_UNBIND_RATE_REACH) {
2524 ip6h = ipv6_hdr(skb);
2525 if (ip6h->nexthdr == NEXTHDR_IPIP) {
2526 /* Map-E LAN->WAN: need to record orig info before fn. */
2527 if (mape_toggle) {
2528 iph = skb_header_pointer(skb, IPV6_HDR_LEN,
2529 sizeof(_iphdr), &_iphdr);
developer4c32b7a2021-11-13 16:46:43 +08002530 if (unlikely(!iph))
2531 return NF_ACCEPT;
2532
developerfd40db22021-04-29 10:08:25 +08002533 switch (iph->protocol) {
2534 case IPPROTO_UDP:
2535 udp = 1;
2536 case IPPROTO_TCP:
2537 break;
2538
2539 default:
2540 return NF_ACCEPT;
2541 }
2542
2543 pptr = skb_header_pointer(skb, IPV6_HDR_LEN + iph->ihl * 4,
2544 sizeof(_ports), &_ports);
developer4c32b7a2021-11-13 16:46:43 +08002545 if (unlikely(!pptr))
2546 return NF_ACCEPT;
2547
developerfd40db22021-04-29 10:08:25 +08002548 entry->bfib1.udp = udp;
2549
developer25fc8c02022-05-06 16:24:02 +08002550 /* Map-E LAN->WAN record inner IPv4 header info. */
developerd35bbcc2022-09-28 22:46:01 +08002551#if defined(CONFIG_MEDIATEK_NETSYS_V2) || defined(CONFIG_MEDIATEK_NETSYS_V3)
developerfd40db22021-04-29 10:08:25 +08002552 entry->bfib1.pkt_type = IPV4_MAP_E;
2553 entry->ipv4_dslite.iblk2.dscp = iph->tos;
developerd35bbcc2022-09-28 22:46:01 +08002554 entry->ipv4_mape.new_sip = ntohl(iph->saddr);
2555 entry->ipv4_mape.new_dip = ntohl(iph->daddr);
2556 entry->ipv4_mape.new_sport = ntohs(pptr->src);
2557 entry->ipv4_mape.new_dport = ntohs(pptr->dst);
developerfd40db22021-04-29 10:08:25 +08002558#else
2559 entry->ipv4_hnapt.iblk2.dscp = iph->tos;
2560 entry->ipv4_hnapt.new_sip = ntohl(iph->saddr);
2561 entry->ipv4_hnapt.new_dip = ntohl(iph->daddr);
2562 entry->ipv4_hnapt.new_sport = ntohs(pptr->src);
2563 entry->ipv4_hnapt.new_dport = ntohs(pptr->dst);
2564#endif
2565 } else {
2566 entry->bfib1.pkt_type = IPV4_DSLITE;
2567 }
2568 }
2569 }
2570 return NF_ACCEPT;
2571}
2572
2573static unsigned int
2574mtk_hnat_ipv6_nf_post_routing(void *priv, struct sk_buff *skb,
2575 const struct nf_hook_state *state)
2576{
developer577ad2f2022-11-28 10:33:36 +08002577 if (!skb)
2578 goto drop;
2579
developerfd40db22021-04-29 10:08:25 +08002580 post_routing_print(skb, state->in, state->out, __func__);
2581
2582 if (!mtk_hnat_nf_post_routing(skb, state->out, hnat_ipv6_get_nexthop,
2583 __func__))
2584 return NF_ACCEPT;
2585
developer577ad2f2022-11-28 10:33:36 +08002586drop:
2587 if (skb)
2588 trace_printk(
2589 "%s:drop (iif=0x%x, out_dev=%s, CB2=0x%x, ppe_hash=0x%x,\n"
2590 "sport=0x%x, reason=0x%x, alg=0x%x)\n",
2591 __func__, skb_hnat_iface(skb), state->out->name,
2592 HNAT_SKB_CB2(skb)->magic, skb_hnat_entry(skb),
2593 skb_hnat_sport(skb), skb_hnat_reason(skb),
2594 skb_hnat_alg(skb));
developerfd40db22021-04-29 10:08:25 +08002595
2596 return NF_DROP;
2597}
2598
2599static unsigned int
2600mtk_hnat_ipv4_nf_post_routing(void *priv, struct sk_buff *skb,
2601 const struct nf_hook_state *state)
2602{
developer577ad2f2022-11-28 10:33:36 +08002603 if (!skb)
2604 goto drop;
2605
developerfd40db22021-04-29 10:08:25 +08002606 post_routing_print(skb, state->in, state->out, __func__);
2607
2608 if (!mtk_hnat_nf_post_routing(skb, state->out, hnat_ipv4_get_nexthop,
2609 __func__))
2610 return NF_ACCEPT;
2611
developer577ad2f2022-11-28 10:33:36 +08002612drop:
2613 if (skb)
2614 trace_printk(
2615 "%s:drop (iif=0x%x, out_dev=%s, CB2=0x%x, ppe_hash=0x%x,\n"
2616 "sport=0x%x, reason=0x%x, alg=0x%x)\n",
2617 __func__, skb_hnat_iface(skb), state->out->name,
2618 HNAT_SKB_CB2(skb)->magic, skb_hnat_entry(skb),
2619 skb_hnat_sport(skb), skb_hnat_reason(skb),
2620 skb_hnat_alg(skb));
developerfd40db22021-04-29 10:08:25 +08002621
2622 return NF_DROP;
2623}
2624
2625static unsigned int
2626mtk_pong_hqos_handler(void *priv, struct sk_buff *skb,
2627 const struct nf_hook_state *state)
2628{
developer659fdeb2022-12-01 23:03:07 +08002629 struct vlan_ethhdr *veth;
2630
2631 if (!skb)
2632 goto drop;
2633
2634 veth = (struct vlan_ethhdr *)skb_mac_header(skb);
developerfd40db22021-04-29 10:08:25 +08002635
developer34028fb2022-01-11 13:51:29 +08002636 if (IS_HQOS_MODE && eth_hdr(skb)->h_proto == HQOS_MAGIC_TAG) {
developerfd40db22021-04-29 10:08:25 +08002637 skb_hnat_entry(skb) = ntohs(veth->h_vlan_TCI) & 0x3fff;
2638 skb_hnat_reason(skb) = HIT_BIND_FORCE_TO_CPU;
2639 }
developerfd40db22021-04-29 10:08:25 +08002640
2641 if (skb_hnat_iface(skb) == FOE_MAGIC_EXT)
2642 clr_from_extge(skb);
2643
2644 /* packets from external devices -> xxx ,step 2, learning stage */
developeraf07fad2021-11-19 17:53:42 +08002645 if (do_ext2ge_fast_learn(state->in, skb) && (!qos_toggle ||
2646 (qos_toggle && eth_hdr(skb)->h_proto != HQOS_MAGIC_TAG))) {
developerfd40db22021-04-29 10:08:25 +08002647 if (!do_hnat_ext_to_ge2(skb, __func__))
2648 return NF_STOLEN;
2649 goto drop;
2650 }
2651
2652 /* packets form ge -> external device */
2653 if (do_ge2ext_fast(state->in, skb)) {
2654 if (!do_hnat_ge_to_ext(skb, __func__))
2655 return NF_STOLEN;
2656 goto drop;
2657 }
2658
2659 return NF_ACCEPT;
developer577ad2f2022-11-28 10:33:36 +08002660
developerfd40db22021-04-29 10:08:25 +08002661drop:
developer577ad2f2022-11-28 10:33:36 +08002662 if (skb)
2663 printk_ratelimited(KERN_WARNING
2664 "%s:drop (in_dev=%s, iif=0x%x, CB2=0x%x, ppe_hash=0x%x,\n"
2665 "sport=0x%x, reason=0x%x, alg=0x%x)\n",
2666 __func__, state->in->name, skb_hnat_iface(skb),
2667 HNAT_SKB_CB2(skb)->magic, skb_hnat_entry(skb),
2668 skb_hnat_sport(skb), skb_hnat_reason(skb),
2669 skb_hnat_alg(skb));
developerfd40db22021-04-29 10:08:25 +08002670
2671 return NF_DROP;
2672}
2673
2674static unsigned int
2675mtk_hnat_br_nf_local_out(void *priv, struct sk_buff *skb,
2676 const struct nf_hook_state *state)
2677{
developer577ad2f2022-11-28 10:33:36 +08002678 if (!skb)
2679 goto drop;
2680
developerfd40db22021-04-29 10:08:25 +08002681 post_routing_print(skb, state->in, state->out, __func__);
2682
2683 if (!mtk_hnat_nf_post_routing(skb, state->out, 0, __func__))
2684 return NF_ACCEPT;
2685
developer577ad2f2022-11-28 10:33:36 +08002686drop:
2687 if (skb)
2688 trace_printk(
2689 "%s:drop (iif=0x%x, out_dev=%s, CB2=0x%x, ppe_hash=0x%x,\n"
2690 "sport=0x%x, reason=0x%x, alg=0x%x)\n",
2691 __func__, skb_hnat_iface(skb), state->out->name,
2692 HNAT_SKB_CB2(skb)->magic, skb_hnat_entry(skb),
2693 skb_hnat_sport(skb), skb_hnat_reason(skb),
2694 skb_hnat_alg(skb));
developerfd40db22021-04-29 10:08:25 +08002695
2696 return NF_DROP;
2697}
2698
2699static unsigned int
2700mtk_hnat_ipv4_nf_local_out(void *priv, struct sk_buff *skb,
2701 const struct nf_hook_state *state)
2702{
2703 struct sk_buff *new_skb;
2704 struct foe_entry *entry;
2705 struct iphdr *iph;
2706
2707 if (!skb_hnat_is_hashed(skb))
2708 return NF_ACCEPT;
2709
developer577ad2f2022-11-28 10:33:36 +08002710 if (skb_hnat_entry(skb) >= hnat_priv->foe_etry_num ||
2711 skb_hnat_ppe(skb) >= CFG_PPE_NUM)
2712 return NF_ACCEPT;
2713
developer471f6562021-05-10 20:48:34 +08002714 entry = &hnat_priv->foe_table_cpu[skb_hnat_ppe(skb)][skb_hnat_entry(skb)];
developerfd40db22021-04-29 10:08:25 +08002715
2716 if (unlikely(skb_headroom(skb) < FOE_INFO_LEN)) {
2717 new_skb = skb_realloc_headroom(skb, FOE_INFO_LEN);
2718 if (!new_skb) {
2719 dev_info(hnat_priv->dev, "%s:drop\n", __func__);
2720 return NF_DROP;
2721 }
2722 dev_kfree_skb(skb);
2723 skb = new_skb;
2724 }
2725
2726 /* Make the flow from local not be bound. */
2727 iph = ip_hdr(skb);
2728 if (iph->protocol == IPPROTO_IPV6) {
2729 entry->udib1.pkt_type = IPV6_6RD;
2730 hnat_set_head_frags(state, skb, 0, hnat_set_alg);
2731 } else {
2732 hnat_set_head_frags(state, skb, 1, hnat_set_alg);
2733 }
2734
2735 return NF_ACCEPT;
2736}
2737
2738static unsigned int mtk_hnat_br_nf_forward(void *priv,
2739 struct sk_buff *skb,
2740 const struct nf_hook_state *state)
2741{
developer4164cfe2022-12-01 11:27:41 +08002742 if ((hnat_priv->data->version == MTK_HNAT_V1_2) &&
developer99506e52021-06-30 22:03:02 +08002743 unlikely(IS_EXT(state->in) && IS_EXT(state->out)))
developerfd40db22021-04-29 10:08:25 +08002744 hnat_set_head_frags(state, skb, 1, hnat_set_alg);
2745
2746 return NF_ACCEPT;
2747}
2748
2749static struct nf_hook_ops mtk_hnat_nf_ops[] __read_mostly = {
2750 {
2751 .hook = mtk_hnat_ipv4_nf_pre_routing,
2752 .pf = NFPROTO_IPV4,
2753 .hooknum = NF_INET_PRE_ROUTING,
2754 .priority = NF_IP_PRI_FIRST + 1,
2755 },
2756 {
2757 .hook = mtk_hnat_ipv6_nf_pre_routing,
2758 .pf = NFPROTO_IPV6,
2759 .hooknum = NF_INET_PRE_ROUTING,
2760 .priority = NF_IP_PRI_FIRST + 1,
2761 },
2762 {
2763 .hook = mtk_hnat_ipv6_nf_post_routing,
2764 .pf = NFPROTO_IPV6,
2765 .hooknum = NF_INET_POST_ROUTING,
2766 .priority = NF_IP_PRI_LAST,
2767 },
2768 {
2769 .hook = mtk_hnat_ipv6_nf_local_out,
2770 .pf = NFPROTO_IPV6,
2771 .hooknum = NF_INET_LOCAL_OUT,
2772 .priority = NF_IP_PRI_LAST,
2773 },
2774 {
2775 .hook = mtk_hnat_ipv4_nf_post_routing,
2776 .pf = NFPROTO_IPV4,
2777 .hooknum = NF_INET_POST_ROUTING,
2778 .priority = NF_IP_PRI_LAST,
2779 },
2780 {
2781 .hook = mtk_hnat_ipv4_nf_local_out,
2782 .pf = NFPROTO_IPV4,
2783 .hooknum = NF_INET_LOCAL_OUT,
2784 .priority = NF_IP_PRI_LAST,
2785 },
2786 {
2787 .hook = mtk_hnat_br_nf_local_in,
2788 .pf = NFPROTO_BRIDGE,
2789 .hooknum = NF_BR_LOCAL_IN,
2790 .priority = NF_BR_PRI_FIRST,
2791 },
2792 {
2793 .hook = mtk_hnat_br_nf_local_out,
2794 .pf = NFPROTO_BRIDGE,
2795 .hooknum = NF_BR_LOCAL_OUT,
2796 .priority = NF_BR_PRI_LAST - 1,
2797 },
2798 {
2799 .hook = mtk_pong_hqos_handler,
2800 .pf = NFPROTO_BRIDGE,
2801 .hooknum = NF_BR_PRE_ROUTING,
developer2b85f652021-08-19 16:09:50 +08002802 .priority = NF_BR_PRI_FIRST + 1,
developerfd40db22021-04-29 10:08:25 +08002803 },
2804};
2805
2806int hnat_register_nf_hooks(void)
2807{
2808 return nf_register_net_hooks(&init_net, mtk_hnat_nf_ops, ARRAY_SIZE(mtk_hnat_nf_ops));
2809}
2810
2811void hnat_unregister_nf_hooks(void)
2812{
2813 nf_unregister_net_hooks(&init_net, mtk_hnat_nf_ops, ARRAY_SIZE(mtk_hnat_nf_ops));
2814}
2815
2816int whnat_adjust_nf_hooks(void)
2817{
2818 struct nf_hook_ops *hook = mtk_hnat_nf_ops;
2819 unsigned int n = ARRAY_SIZE(mtk_hnat_nf_ops);
2820
developerfd40db22021-04-29 10:08:25 +08002821 while (n-- > 0) {
2822 if (hook[n].hook == mtk_hnat_br_nf_local_in) {
2823 hook[n].hooknum = NF_BR_PRE_ROUTING;
developer2b85f652021-08-19 16:09:50 +08002824 hook[n].priority = NF_BR_PRI_FIRST + 1;
developerfd40db22021-04-29 10:08:25 +08002825 } else if (hook[n].hook == mtk_hnat_br_nf_local_out) {
2826 hook[n].hooknum = NF_BR_POST_ROUTING;
2827 } else if (hook[n].hook == mtk_pong_hqos_handler) {
2828 hook[n].hook = mtk_hnat_br_nf_forward;
2829 hook[n].hooknum = NF_BR_FORWARD;
2830 hook[n].priority = NF_BR_PRI_LAST - 1;
2831 }
2832 }
2833
2834 return 0;
2835}
2836
developerfd40db22021-04-29 10:08:25 +08002837int mtk_hqos_ptype_cb(struct sk_buff *skb, struct net_device *dev,
2838 struct packet_type *pt, struct net_device *unused)
2839{
2840 struct vlan_ethhdr *veth = (struct vlan_ethhdr *)skb_mac_header(skb);
2841
2842 skb_hnat_entry(skb) = ntohs(veth->h_vlan_TCI) & 0x3fff;
2843 skb_hnat_reason(skb) = HIT_BIND_FORCE_TO_CPU;
2844
developer659fdeb2022-12-01 23:03:07 +08002845 if (do_hnat_ge_to_ext(skb, __func__) == -1)
2846 return 1;
developerfd40db22021-04-29 10:08:25 +08002847
2848 return 0;
2849}
developerfd40db22021-04-29 10:08:25 +08002850