blob: a30ec1eda81c97a64ed106292310a1b80e4633dd [file] [log] [blame]
developerfd40db22021-04-29 10:08:25 +08001/* This program is free software; you can redistribute it and/or modify
2 * it under the terms of the GNU General Public License as published by
3 * the Free Software Foundation; version 2 of the License
4 *
5 * This program is distributed in the hope that it will be useful,
6 * but WITHOUT ANY WARRANTY; without even the implied warranty of
7 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
8 * GNU General Public License for more details.
9 *
10 * Copyright (C) 2014-2016 Sean Wang <sean.wang@mediatek.com>
11 * Copyright (C) 2016-2017 John Crispin <blogic@openwrt.org>
12 */
13
14#include <linux/netfilter_bridge.h>
15#include <linux/netfilter_ipv6.h>
16
17#include <net/arp.h>
18#include <net/neighbour.h>
19#include <net/netfilter/nf_conntrack_helper.h>
20#include <net/netfilter/nf_flow_table.h>
21#include <net/ipv6.h>
22#include <net/ip6_route.h>
23#include <net/ip.h>
24#include <net/tcp.h>
25#include <net/udp.h>
developer30a47682021-11-02 17:06:14 +080026#include <net/netfilter/nf_conntrack.h>
27#include <net/netfilter/nf_conntrack_acct.h>
developerfd40db22021-04-29 10:08:25 +080028
29#include "nf_hnat_mtk.h"
30#include "hnat.h"
31
32#include "../mtk_eth_soc.h"
developer8051e042022-04-08 13:26:36 +080033#include "../mtk_eth_reset.h"
developerfd40db22021-04-29 10:08:25 +080034
35#define do_ge2ext_fast(dev, skb) \
developerd35bbcc2022-09-28 22:46:01 +080036 ((IS_LAN_GRP(dev) || IS_WAN(dev) || IS_PPD(dev)) && \
developerfd40db22021-04-29 10:08:25 +080037 skb_hnat_is_hashed(skb) && \
38 skb_hnat_reason(skb) == HIT_BIND_FORCE_TO_CPU)
39#define do_ext2ge_fast_learn(dev, skb) \
40 (IS_PPD(dev) && \
41 (skb_hnat_sport(skb) == NR_PDMA_PORT || \
42 skb_hnat_sport(skb) == NR_QDMA_PORT) && \
43 ((get_dev_from_index(skb->vlan_tci & VLAN_VID_MASK)) || \
44 get_wandev_from_index(skb->vlan_tci & VLAN_VID_MASK)))
45#define do_mape_w2l_fast(dev, skb) \
46 (mape_toggle && IS_WAN(dev) && (!is_from_mape(skb)))
47
48static struct ipv6hdr mape_l2w_v6h;
49static struct ipv6hdr mape_w2l_v6h;
50static inline uint8_t get_wifi_hook_if_index_from_dev(const struct net_device *dev)
51{
52 int i;
53
54 for (i = 1; i < MAX_IF_NUM; i++) {
55 if (hnat_priv->wifi_hook_if[i] == dev)
56 return i;
57 }
58
59 return 0;
60}
61
62static inline int get_ext_device_number(void)
63{
64 int i, number = 0;
65
66 for (i = 0; i < MAX_EXT_DEVS && hnat_priv->ext_if[i]; i++)
67 number += 1;
68 return number;
69}
70
71static inline int find_extif_from_devname(const char *name)
72{
73 int i;
74 struct extdev_entry *ext_entry;
75
76 for (i = 0; i < MAX_EXT_DEVS && hnat_priv->ext_if[i]; i++) {
77 ext_entry = hnat_priv->ext_if[i];
78 if (!strcmp(name, ext_entry->name))
79 return 1;
80 }
81 return 0;
82}
83
84static inline int get_index_from_dev(const struct net_device *dev)
85{
86 int i;
87 struct extdev_entry *ext_entry;
88
89 for (i = 0; i < MAX_EXT_DEVS && hnat_priv->ext_if[i]; i++) {
90 ext_entry = hnat_priv->ext_if[i];
91 if (dev == ext_entry->dev)
92 return ext_entry->dev->ifindex;
93 }
94 return 0;
95}
96
97static inline struct net_device *get_dev_from_index(int index)
98{
99 int i;
100 struct extdev_entry *ext_entry;
101 struct net_device *dev = 0;
102
103 for (i = 0; i < MAX_EXT_DEVS && hnat_priv->ext_if[i]; i++) {
104 ext_entry = hnat_priv->ext_if[i];
105 if (ext_entry->dev && index == ext_entry->dev->ifindex) {
106 dev = ext_entry->dev;
107 break;
108 }
109 }
110 return dev;
111}
112
113static inline struct net_device *get_wandev_from_index(int index)
114{
developer8c9c0d02021-06-18 16:15:37 +0800115 if (!hnat_priv->g_wandev)
116 hnat_priv->g_wandev = dev_get_by_name(&init_net, hnat_priv->wan);
developerfd40db22021-04-29 10:08:25 +0800117
developer8c9c0d02021-06-18 16:15:37 +0800118 if (hnat_priv->g_wandev && hnat_priv->g_wandev->ifindex == index)
119 return hnat_priv->g_wandev;
developerfd40db22021-04-29 10:08:25 +0800120 return NULL;
121}
122
123static inline int extif_set_dev(struct net_device *dev)
124{
125 int i;
126 struct extdev_entry *ext_entry;
127
128 for (i = 0; i < MAX_EXT_DEVS && hnat_priv->ext_if[i]; i++) {
129 ext_entry = hnat_priv->ext_if[i];
130 if (!strcmp(dev->name, ext_entry->name) && !ext_entry->dev) {
131 dev_hold(dev);
132 ext_entry->dev = dev;
133 pr_info("%s(%s)\n", __func__, dev->name);
134
135 return ext_entry->dev->ifindex;
136 }
137 }
138
139 return -1;
140}
141
142static inline int extif_put_dev(struct net_device *dev)
143{
144 int i;
145 struct extdev_entry *ext_entry;
146
147 for (i = 0; i < MAX_EXT_DEVS && hnat_priv->ext_if[i]; i++) {
148 ext_entry = hnat_priv->ext_if[i];
149 if (ext_entry->dev == dev) {
150 ext_entry->dev = NULL;
151 dev_put(dev);
152 pr_info("%s(%s)\n", __func__, dev->name);
153
developerbc53e5f2021-05-21 10:07:17 +0800154 return 0;
developerfd40db22021-04-29 10:08:25 +0800155 }
156 }
157
158 return -1;
159}
160
161int ext_if_add(struct extdev_entry *ext_entry)
162{
163 int len = get_ext_device_number();
164
developer4c32b7a2021-11-13 16:46:43 +0800165 if (len < MAX_EXT_DEVS)
166 hnat_priv->ext_if[len++] = ext_entry;
167
developerfd40db22021-04-29 10:08:25 +0800168 return len;
169}
170
171int ext_if_del(struct extdev_entry *ext_entry)
172{
173 int i, j;
174
175 for (i = 0; i < MAX_EXT_DEVS; i++) {
176 if (hnat_priv->ext_if[i] == ext_entry) {
177 for (j = i; hnat_priv->ext_if[j] && j < MAX_EXT_DEVS - 1; j++)
178 hnat_priv->ext_if[j] = hnat_priv->ext_if[j + 1];
179 hnat_priv->ext_if[j] = NULL;
180 break;
181 }
182 }
183
184 return i;
185}
186
187void foe_clear_all_bind_entries(struct net_device *dev)
188{
developer471f6562021-05-10 20:48:34 +0800189 int i, hash_index;
developerfd40db22021-04-29 10:08:25 +0800190 struct foe_entry *entry;
191
developerd35bbcc2022-09-28 22:46:01 +0800192 if (!IS_LAN_GRP(dev) && !IS_WAN(dev) &&
developerfd40db22021-04-29 10:08:25 +0800193 !find_extif_from_devname(dev->name) &&
194 !dev->netdev_ops->ndo_flow_offload_check)
195 return;
196
developer471f6562021-05-10 20:48:34 +0800197 for (i = 0; i < CFG_PPE_NUM; i++) {
198 cr_set_field(hnat_priv->ppe_base[i] + PPE_TB_CFG,
199 SMA, SMA_ONLY_FWD_CPU);
200
201 for (hash_index = 0; hash_index < hnat_priv->foe_etry_num; hash_index++) {
202 entry = hnat_priv->foe_table_cpu[i] + hash_index;
203 if (entry->bfib1.state == BIND) {
204 entry->ipv4_hnapt.udib1.state = INVALID;
205 entry->ipv4_hnapt.udib1.time_stamp =
206 readl((hnat_priv->fe_base + 0x0010)) & 0xFF;
207 }
developerfd40db22021-04-29 10:08:25 +0800208 }
209 }
210
211 /* clear HWNAT cache */
212 hnat_cache_ebl(1);
213
214 mod_timer(&hnat_priv->hnat_sma_build_entry_timer, jiffies + 3 * HZ);
215}
216
217static void gmac_ppe_fwd_enable(struct net_device *dev)
218{
219 if (IS_LAN(dev) || IS_GMAC1_MODE)
developerd35bbcc2022-09-28 22:46:01 +0800220 set_gmac_ppe_fwd(NR_GMAC1_PORT, 1);
developerfd40db22021-04-29 10:08:25 +0800221 else if (IS_WAN(dev))
developerd35bbcc2022-09-28 22:46:01 +0800222 set_gmac_ppe_fwd(NR_GMAC2_PORT, 1);
223 else if (IS_LAN2(dev))
224 set_gmac_ppe_fwd(NR_GMAC3_PORT, 1);
developerfd40db22021-04-29 10:08:25 +0800225}
226
227int nf_hnat_netdevice_event(struct notifier_block *unused, unsigned long event,
228 void *ptr)
229{
230 struct net_device *dev;
231
232 dev = netdev_notifier_info_to_dev(ptr);
233
234 switch (event) {
235 case NETDEV_UP:
236 gmac_ppe_fwd_enable(dev);
237
238 extif_set_dev(dev);
239
240 break;
241 case NETDEV_GOING_DOWN:
242 if (!get_wifi_hook_if_index_from_dev(dev))
243 extif_put_dev(dev);
244
245 foe_clear_all_bind_entries(dev);
246
247 break;
developer8c9c0d02021-06-18 16:15:37 +0800248 case NETDEV_UNREGISTER:
developer1901f412022-01-04 17:22:00 +0800249 if (hnat_priv->g_ppdev == dev) {
developer8c9c0d02021-06-18 16:15:37 +0800250 hnat_priv->g_ppdev = NULL;
251 dev_put(dev);
252 }
developer1901f412022-01-04 17:22:00 +0800253 if (hnat_priv->g_wandev == dev) {
developer8c9c0d02021-06-18 16:15:37 +0800254 hnat_priv->g_wandev = NULL;
255 dev_put(dev);
256 }
257
258 break;
259 case NETDEV_REGISTER:
260 if (IS_PPD(dev) && !hnat_priv->g_ppdev)
261 hnat_priv->g_ppdev = dev_get_by_name(&init_net, hnat_priv->ppd);
262 if (IS_WAN(dev) && !hnat_priv->g_wandev)
263 hnat_priv->g_wandev = dev_get_by_name(&init_net, hnat_priv->wan);
264
265 break;
developer8051e042022-04-08 13:26:36 +0800266 case MTK_FE_RESET_NAT_DONE:
267 pr_info("[%s] HNAT driver starts to do warm init !\n", __func__);
268 hnat_warm_init();
269 break;
developerfd40db22021-04-29 10:08:25 +0800270 default:
271 break;
272 }
273
274 return NOTIFY_DONE;
275}
276
277void foe_clear_entry(struct neighbour *neigh)
278{
279 u32 *daddr = (u32 *)neigh->primary_key;
280 unsigned char h_dest[ETH_ALEN];
281 struct foe_entry *entry;
developer471f6562021-05-10 20:48:34 +0800282 int i, hash_index;
developerfd40db22021-04-29 10:08:25 +0800283 u32 dip;
284
285 dip = (u32)(*daddr);
286
developer471f6562021-05-10 20:48:34 +0800287 for (i = 0; i < CFG_PPE_NUM; i++) {
developer8051e042022-04-08 13:26:36 +0800288 if (!hnat_priv->foe_table_cpu[i])
289 continue;
290
developer471f6562021-05-10 20:48:34 +0800291 for (hash_index = 0; hash_index < hnat_priv->foe_etry_num; hash_index++) {
292 entry = hnat_priv->foe_table_cpu[i] + hash_index;
293 if (entry->bfib1.state == BIND &&
294 entry->ipv4_hnapt.new_dip == ntohl(dip)) {
295 *((u32 *)h_dest) = swab32(entry->ipv4_hnapt.dmac_hi);
296 *((u16 *)&h_dest[4]) =
297 swab16(entry->ipv4_hnapt.dmac_lo);
298 if (strncmp(h_dest, neigh->ha, ETH_ALEN) != 0) {
299 pr_info("%s: state=%d\n", __func__,
300 neigh->nud_state);
301 cr_set_field(hnat_priv->ppe_base[i] + PPE_TB_CFG,
302 SMA, SMA_ONLY_FWD_CPU);
developerfd40db22021-04-29 10:08:25 +0800303
developer471f6562021-05-10 20:48:34 +0800304 entry->ipv4_hnapt.udib1.state = INVALID;
305 entry->ipv4_hnapt.udib1.time_stamp =
306 readl((hnat_priv->fe_base + 0x0010)) & 0xFF;
developerfd40db22021-04-29 10:08:25 +0800307
developer471f6562021-05-10 20:48:34 +0800308 /* clear HWNAT cache */
309 hnat_cache_ebl(1);
developerfd40db22021-04-29 10:08:25 +0800310
developer471f6562021-05-10 20:48:34 +0800311 mod_timer(&hnat_priv->hnat_sma_build_entry_timer,
312 jiffies + 3 * HZ);
developerfd40db22021-04-29 10:08:25 +0800313
developer471f6562021-05-10 20:48:34 +0800314 pr_info("Delete old entry: dip =%pI4\n", &dip);
315 pr_info("Old mac= %pM\n", h_dest);
316 pr_info("New mac= %pM\n", neigh->ha);
317 }
developerfd40db22021-04-29 10:08:25 +0800318 }
319 }
320 }
321}
322
323int nf_hnat_netevent_handler(struct notifier_block *unused, unsigned long event,
324 void *ptr)
325{
326 struct net_device *dev = NULL;
327 struct neighbour *neigh = NULL;
328
329 switch (event) {
330 case NETEVENT_NEIGH_UPDATE:
331 neigh = ptr;
332 dev = neigh->dev;
333 if (dev)
334 foe_clear_entry(neigh);
335 break;
336 }
337
338 return NOTIFY_DONE;
339}
340
341unsigned int mape_add_ipv6_hdr(struct sk_buff *skb, struct ipv6hdr mape_ip6h)
342{
343 struct ethhdr *eth = NULL;
344 struct ipv6hdr *ip6h = NULL;
345 struct iphdr *iph = NULL;
346
347 if (skb_headroom(skb) < IPV6_HDR_LEN || skb_shared(skb) ||
348 (skb_cloned(skb) && !skb_clone_writable(skb, 0))) {
349 return -1;
350 }
351
352 /* point to L3 */
353 memcpy(skb->data - IPV6_HDR_LEN - ETH_HLEN, skb_push(skb, ETH_HLEN), ETH_HLEN);
354 memcpy(skb_push(skb, IPV6_HDR_LEN - ETH_HLEN), &mape_ip6h, IPV6_HDR_LEN);
355
356 eth = (struct ethhdr *)(skb->data - ETH_HLEN);
357 eth->h_proto = htons(ETH_P_IPV6);
358 skb->protocol = htons(ETH_P_IPV6);
359
360 iph = (struct iphdr *)(skb->data + IPV6_HDR_LEN);
361 ip6h = (struct ipv6hdr *)(skb->data);
362 ip6h->payload_len = iph->tot_len; /* maybe different with ipv4 */
363
364 skb_set_network_header(skb, 0);
365 skb_set_transport_header(skb, iph->ihl * 4 + IPV6_HDR_LEN);
366 return 0;
367}
368
369static void fix_skb_packet_type(struct sk_buff *skb, struct net_device *dev,
370 struct ethhdr *eth)
371{
372 skb->pkt_type = PACKET_HOST;
373 if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
374 if (ether_addr_equal_64bits(eth->h_dest, dev->broadcast))
375 skb->pkt_type = PACKET_BROADCAST;
376 else
377 skb->pkt_type = PACKET_MULTICAST;
378 }
379}
380
381unsigned int do_hnat_ext_to_ge(struct sk_buff *skb, const struct net_device *in,
382 const char *func)
383{
384 if (hnat_priv->g_ppdev && hnat_priv->g_ppdev->flags & IFF_UP) {
385 u16 vlan_id = 0;
386 skb_set_network_header(skb, 0);
387 skb_push(skb, ETH_HLEN);
388 set_to_ppe(skb);
389
390 vlan_id = skb_vlan_tag_get_id(skb);
391 if (vlan_id) {
392 skb = vlan_insert_tag(skb, skb->vlan_proto, skb->vlan_tci);
393 if (!skb)
394 return -1;
395 }
396
397 /*set where we come from*/
398 skb->vlan_proto = htons(ETH_P_8021Q);
399 skb->vlan_tci =
400 (VLAN_CFI_MASK | (in->ifindex & VLAN_VID_MASK));
401 trace_printk(
402 "%s: vlan_prot=0x%x, vlan_tci=%x, in->name=%s, skb->dev->name=%s\n",
403 __func__, ntohs(skb->vlan_proto), skb->vlan_tci,
404 in->name, hnat_priv->g_ppdev->name);
405 skb->dev = hnat_priv->g_ppdev;
406 dev_queue_xmit(skb);
407 trace_printk("%s: called from %s successfully\n", __func__, func);
408 return 0;
409 }
410
411 trace_printk("%s: called from %s fail\n", __func__, func);
412 return -1;
413}
414
415unsigned int do_hnat_ext_to_ge2(struct sk_buff *skb, const char *func)
416{
417 struct ethhdr *eth = eth_hdr(skb);
418 struct net_device *dev;
419 struct foe_entry *entry;
420
421 trace_printk("%s: vlan_prot=0x%x, vlan_tci=%x\n", __func__,
422 ntohs(skb->vlan_proto), skb->vlan_tci);
423
developer577ad2f2022-11-28 10:33:36 +0800424 if (skb_hnat_entry(skb) >= hnat_priv->foe_etry_num ||
425 skb_hnat_ppe(skb) >= CFG_PPE_NUM)
426 return -1;
427
developerfd40db22021-04-29 10:08:25 +0800428 dev = get_dev_from_index(skb->vlan_tci & VLAN_VID_MASK);
429
430 if (dev) {
431 /*set where we to go*/
432 skb->dev = dev;
433 skb->vlan_proto = 0;
434 skb->vlan_tci = 0;
435
436 if (ntohs(eth->h_proto) == ETH_P_8021Q) {
437 skb = skb_vlan_untag(skb);
438 if (unlikely(!skb))
439 return -1;
440 }
441
442 if (IS_BOND_MODE &&
developer4164cfe2022-12-01 11:27:41 +0800443 (((hnat_priv->data->version == MTK_HNAT_V2 ||
444 hnat_priv->data->version == MTK_HNAT_V3) &&
developerfd40db22021-04-29 10:08:25 +0800445 (skb_hnat_entry(skb) != 0x7fff)) ||
developer4164cfe2022-12-01 11:27:41 +0800446 ((hnat_priv->data->version != MTK_HNAT_V2 &&
447 hnat_priv->data->version != MTK_HNAT_V3) &&
developerfd40db22021-04-29 10:08:25 +0800448 (skb_hnat_entry(skb) != 0x3fff))))
449 skb_set_hash(skb, skb_hnat_entry(skb) >> 1, PKT_HASH_TYPE_L4);
450
451 set_from_extge(skb);
452 fix_skb_packet_type(skb, skb->dev, eth);
453 netif_rx(skb);
454 trace_printk("%s: called from %s successfully\n", __func__,
455 func);
456 return 0;
457 } else {
458 /* MapE WAN --> LAN/WLAN PingPong. */
459 dev = get_wandev_from_index(skb->vlan_tci & VLAN_VID_MASK);
460 if (mape_toggle && dev) {
461 if (!mape_add_ipv6_hdr(skb, mape_w2l_v6h)) {
462 skb_set_mac_header(skb, -ETH_HLEN);
463 skb->dev = dev;
464 set_from_mape(skb);
465 skb->vlan_proto = 0;
466 skb->vlan_tci = 0;
467 fix_skb_packet_type(skb, skb->dev, eth_hdr(skb));
developer471f6562021-05-10 20:48:34 +0800468 entry = &hnat_priv->foe_table_cpu[skb_hnat_ppe(skb)][skb_hnat_entry(skb)];
developerfd40db22021-04-29 10:08:25 +0800469 entry->bfib1.pkt_type = IPV4_HNAPT;
470 netif_rx(skb);
471 return 0;
472 }
473 }
474 trace_printk("%s: called from %s fail\n", __func__, func);
475 return -1;
476 }
477}
478
479unsigned int do_hnat_ge_to_ext(struct sk_buff *skb, const char *func)
480{
481 /*set where we to go*/
482 u8 index;
483 struct foe_entry *entry;
484 struct net_device *dev;
485
developer577ad2f2022-11-28 10:33:36 +0800486 if (skb_hnat_entry(skb) >= hnat_priv->foe_etry_num ||
487 skb_hnat_ppe(skb) >= CFG_PPE_NUM)
488 return -1;
489
developer471f6562021-05-10 20:48:34 +0800490 entry = &hnat_priv->foe_table_cpu[skb_hnat_ppe(skb)][skb_hnat_entry(skb)];
developerfd40db22021-04-29 10:08:25 +0800491
492 if (IS_IPV4_GRP(entry))
493 index = entry->ipv4_hnapt.act_dp;
494 else
495 index = entry->ipv6_5t_route.act_dp;
496
developerdce18f52023-03-18 22:11:13 +0800497 dev = get_dev_from_index(index);
498 if (!dev) {
499 trace_printk("%s: called from %s. Get wifi interface fail\n",
500 __func__, func);
501 return 0;
502 }
503
504 skb->dev = dev;
developerfd40db22021-04-29 10:08:25 +0800505
developer34028fb2022-01-11 13:51:29 +0800506 if (IS_HQOS_MODE && eth_hdr(skb)->h_proto == HQOS_MAGIC_TAG) {
developerfd40db22021-04-29 10:08:25 +0800507 skb = skb_unshare(skb, GFP_ATOMIC);
508 if (!skb)
509 return NF_ACCEPT;
510
511 if (unlikely(!pskb_may_pull(skb, VLAN_HLEN)))
512 return NF_ACCEPT;
513
514 skb_pull_rcsum(skb, VLAN_HLEN);
515
516 memmove(skb->data - ETH_HLEN, skb->data - ETH_HLEN - VLAN_HLEN,
517 2 * ETH_ALEN);
518 }
developerfd40db22021-04-29 10:08:25 +0800519
520 if (skb->dev) {
521 skb_set_network_header(skb, 0);
522 skb_push(skb, ETH_HLEN);
523 dev_queue_xmit(skb);
524 trace_printk("%s: called from %s successfully\n", __func__,
525 func);
526 return 0;
527 } else {
528 if (mape_toggle) {
529 /* Add ipv6 header mape for lan/wlan -->wan */
530 dev = get_wandev_from_index(index);
531 if (dev) {
532 if (!mape_add_ipv6_hdr(skb, mape_l2w_v6h)) {
533 skb_set_network_header(skb, 0);
534 skb_push(skb, ETH_HLEN);
535 skb_set_mac_header(skb, 0);
536 skb->dev = dev;
537 dev_queue_xmit(skb);
538 return 0;
539 }
540 trace_printk("%s: called from %s fail[MapE]\n", __func__,
541 func);
542 return -1;
543 }
544 }
545 }
546 /*if external devices is down, invalidate related ppe entry*/
547 if (entry_hnat_is_bound(entry)) {
548 entry->bfib1.state = INVALID;
549 if (IS_IPV4_GRP(entry))
550 entry->ipv4_hnapt.act_dp = 0;
551 else
552 entry->ipv6_5t_route.act_dp = 0;
553
554 /* clear HWNAT cache */
555 hnat_cache_ebl(1);
556 }
557 trace_printk("%s: called from %s fail, index=%x\n", __func__,
558 func, index);
559 return -1;
560}
561
562static void pre_routing_print(struct sk_buff *skb, const struct net_device *in,
563 const struct net_device *out, const char *func)
564{
565 trace_printk(
566 "[%s]: %s(iif=0x%x CB2=0x%x)-->%s (ppe_hash=0x%x) sport=0x%x reason=0x%x alg=0x%x from %s\n",
567 __func__, in->name, skb_hnat_iface(skb),
568 HNAT_SKB_CB2(skb)->magic, out->name, skb_hnat_entry(skb),
569 skb_hnat_sport(skb), skb_hnat_reason(skb), skb_hnat_alg(skb),
570 func);
571}
572
573static void post_routing_print(struct sk_buff *skb, const struct net_device *in,
574 const struct net_device *out, const char *func)
575{
576 trace_printk(
577 "[%s]: %s(iif=0x%x, CB2=0x%x)-->%s (ppe_hash=0x%x) sport=0x%x reason=0x%x alg=0x%x from %s\n",
578 __func__, in->name, skb_hnat_iface(skb),
579 HNAT_SKB_CB2(skb)->magic, out->name, skb_hnat_entry(skb),
580 skb_hnat_sport(skb), skb_hnat_reason(skb), skb_hnat_alg(skb),
581 func);
582}
583
584static inline void hnat_set_iif(const struct nf_hook_state *state,
585 struct sk_buff *skb, int val)
586{
developer40017972021-06-29 14:27:35 +0800587 if (IS_WHNAT(state->in) && FROM_WED(skb)) {
developere567ad32021-05-25 17:16:17 +0800588 return;
589 } else if (IS_LAN(state->in)) {
developerfd40db22021-04-29 10:08:25 +0800590 skb_hnat_iface(skb) = FOE_MAGIC_GE_LAN;
developerd35bbcc2022-09-28 22:46:01 +0800591 } else if (IS_LAN2(state->in)) {
592 skb_hnat_iface(skb) = FOE_MAGIC_GE_LAN2;
developerfd40db22021-04-29 10:08:25 +0800593 } else if (IS_PPD(state->in)) {
594 skb_hnat_iface(skb) = FOE_MAGIC_GE_PPD;
595 } else if (IS_EXT(state->in)) {
596 skb_hnat_iface(skb) = FOE_MAGIC_EXT;
597 } else if (IS_WAN(state->in)) {
598 skb_hnat_iface(skb) = FOE_MAGIC_GE_WAN;
developerfd40db22021-04-29 10:08:25 +0800599 } else if (!IS_BR(state->in)) {
developer99506e52021-06-30 22:03:02 +0800600 if (state->in->netdev_ops->ndo_flow_offload_check) {
601 skb_hnat_iface(skb) = FOE_MAGIC_GE_VIRTUAL;
602 } else {
603 skb_hnat_iface(skb) = FOE_INVALID;
developerfd40db22021-04-29 10:08:25 +0800604
developer99506e52021-06-30 22:03:02 +0800605 if (is_magic_tag_valid(skb) &&
606 IS_SPACE_AVAILABLE_HEAD(skb))
607 memset(skb_hnat_info(skb), 0, FOE_INFO_LEN);
608 }
developerfd40db22021-04-29 10:08:25 +0800609 }
610}
611
612static inline void hnat_set_alg(const struct nf_hook_state *state,
613 struct sk_buff *skb, int val)
614{
615 skb_hnat_alg(skb) = val;
616}
617
618static inline void hnat_set_head_frags(const struct nf_hook_state *state,
619 struct sk_buff *head_skb, int val,
620 void (*fn)(const struct nf_hook_state *state,
621 struct sk_buff *skb, int val))
622{
623 struct sk_buff *segs = skb_shinfo(head_skb)->frag_list;
624
625 fn(state, head_skb, val);
626 while (segs) {
627 fn(state, segs, val);
628 segs = segs->next;
629 }
630}
631
developer25fc8c02022-05-06 16:24:02 +0800632static void ppe_fill_flow_lbl(struct foe_entry *entry, struct ipv6hdr *ip6h)
633{
634 entry->ipv4_dslite.flow_lbl[0] = ip6h->flow_lbl[2];
635 entry->ipv4_dslite.flow_lbl[1] = ip6h->flow_lbl[1];
636 entry->ipv4_dslite.flow_lbl[2] = ip6h->flow_lbl[0];
637}
638
developerfd40db22021-04-29 10:08:25 +0800639unsigned int do_hnat_mape_w2l_fast(struct sk_buff *skb, const struct net_device *in,
640 const char *func)
641{
642 struct ipv6hdr *ip6h = ipv6_hdr(skb);
643 struct iphdr _iphdr;
644 struct iphdr *iph;
645 struct ethhdr *eth;
646
647 /* WAN -> LAN/WLAN MapE. */
648 if (mape_toggle && (ip6h->nexthdr == NEXTHDR_IPIP)) {
649 iph = skb_header_pointer(skb, IPV6_HDR_LEN, sizeof(_iphdr), &_iphdr);
developer4c32b7a2021-11-13 16:46:43 +0800650 if (unlikely(!iph))
651 return -1;
652
developerfd40db22021-04-29 10:08:25 +0800653 switch (iph->protocol) {
654 case IPPROTO_UDP:
655 case IPPROTO_TCP:
656 break;
657 default:
658 return -1;
659 }
660 mape_w2l_v6h = *ip6h;
661
662 /* Remove ipv6 header. */
663 memcpy(skb->data + IPV6_HDR_LEN - ETH_HLEN,
664 skb->data - ETH_HLEN, ETH_HLEN);
665 skb_pull(skb, IPV6_HDR_LEN - ETH_HLEN);
666 skb_set_mac_header(skb, 0);
667 skb_set_network_header(skb, ETH_HLEN);
668 skb_set_transport_header(skb, ETH_HLEN + sizeof(_iphdr));
669
670 eth = eth_hdr(skb);
671 eth->h_proto = htons(ETH_P_IP);
672 set_to_ppe(skb);
673
674 skb->vlan_proto = htons(ETH_P_8021Q);
675 skb->vlan_tci =
676 (VLAN_CFI_MASK | (in->ifindex & VLAN_VID_MASK));
677
678 if (!hnat_priv->g_ppdev)
679 hnat_priv->g_ppdev = dev_get_by_name(&init_net, hnat_priv->ppd);
680
681 skb->dev = hnat_priv->g_ppdev;
682 skb->protocol = htons(ETH_P_IP);
683
684 dev_queue_xmit(skb);
685
686 return 0;
687 }
688 return -1;
689}
690
developere8b7dfa2023-04-20 10:16:44 +0800691void mtk_464xlat_pre_process(struct sk_buff *skb)
692{
693 struct foe_entry *foe;
694
developerdd61ff42023-05-02 22:17:16 +0800695 if (skb_hnat_entry(skb) >= hnat_priv->foe_etry_num ||
696 skb_hnat_ppe(skb) >= CFG_PPE_NUM)
697 return;
698
developere8b7dfa2023-04-20 10:16:44 +0800699 foe = &hnat_priv->foe_table_cpu[skb_hnat_ppe(skb)][skb_hnat_entry(skb)];
700 if (foe->bfib1.state != BIND &&
701 skb_hnat_reason(skb) == HIT_UNBIND_RATE_REACH)
702 memcpy(&headroom[skb_hnat_entry(skb)], skb->head,
703 sizeof(struct hnat_desc));
developer25fc8c02022-05-06 16:24:02 +0800704
developere8b7dfa2023-04-20 10:16:44 +0800705 if (foe->bfib1.state == BIND)
706 memset(&headroom[skb_hnat_entry(skb)], 0,
707 sizeof(struct hnat_desc));
708}
developer25fc8c02022-05-06 16:24:02 +0800709
developerfd40db22021-04-29 10:08:25 +0800710static unsigned int is_ppe_support_type(struct sk_buff *skb)
711{
712 struct ethhdr *eth = NULL;
713 struct iphdr *iph = NULL;
714 struct ipv6hdr *ip6h = NULL;
715 struct iphdr _iphdr;
716
717 eth = eth_hdr(skb);
developerfd2d7422021-06-09 17:09:39 +0800718 if (!is_magic_tag_valid(skb) || !IS_SPACE_AVAILABLE_HEAD(skb) ||
developerb254f762022-01-20 20:06:25 +0800719 is_broadcast_ether_addr(eth->h_dest))
developerfd40db22021-04-29 10:08:25 +0800720 return 0;
721
722 switch (ntohs(skb->protocol)) {
723 case ETH_P_IP:
724 iph = ip_hdr(skb);
725
726 /* do not accelerate non tcp/udp traffic */
727 if ((iph->protocol == IPPROTO_TCP) ||
728 (iph->protocol == IPPROTO_UDP) ||
729 (iph->protocol == IPPROTO_IPV6)) {
730 return 1;
731 }
732
733 break;
734 case ETH_P_IPV6:
735 ip6h = ipv6_hdr(skb);
736
737 if ((ip6h->nexthdr == NEXTHDR_TCP) ||
738 (ip6h->nexthdr == NEXTHDR_UDP)) {
739 return 1;
740 } else if (ip6h->nexthdr == NEXTHDR_IPIP) {
741 iph = skb_header_pointer(skb, IPV6_HDR_LEN,
742 sizeof(_iphdr), &_iphdr);
developer4c32b7a2021-11-13 16:46:43 +0800743 if (unlikely(!iph))
744 return 0;
developerfd40db22021-04-29 10:08:25 +0800745
746 if ((iph->protocol == IPPROTO_TCP) ||
747 (iph->protocol == IPPROTO_UDP)) {
748 return 1;
749 }
750
751 }
752
753 break;
754 case ETH_P_8021Q:
755 return 1;
756 }
757
758 return 0;
759}
760
761static unsigned int
762mtk_hnat_ipv6_nf_pre_routing(void *priv, struct sk_buff *skb,
763 const struct nf_hook_state *state)
764{
developer577ad2f2022-11-28 10:33:36 +0800765 if (!skb)
766 goto drop;
767
developerfd40db22021-04-29 10:08:25 +0800768 if (!is_ppe_support_type(skb)) {
769 hnat_set_head_frags(state, skb, 1, hnat_set_alg);
770 return NF_ACCEPT;
771 }
772
773 hnat_set_head_frags(state, skb, -1, hnat_set_iif);
774
775 pre_routing_print(skb, state->in, state->out, __func__);
776
developerfd40db22021-04-29 10:08:25 +0800777 /* packets from external devices -> xxx ,step 1 , learning stage & bound stage*/
778 if (do_ext2ge_fast_try(state->in, skb)) {
779 if (!do_hnat_ext_to_ge(skb, state->in, __func__))
780 return NF_STOLEN;
developerfd40db22021-04-29 10:08:25 +0800781 return NF_ACCEPT;
782 }
783
784 /* packets form ge -> external device
785 * For standalone wan interface
786 */
787 if (do_ge2ext_fast(state->in, skb)) {
788 if (!do_hnat_ge_to_ext(skb, __func__))
789 return NF_STOLEN;
790 goto drop;
791 }
792
developerf4c370a2022-10-08 17:01:19 +0800793
794#if !(defined(CONFIG_MEDIATEK_NETSYS_V2) || defined(CONFIG_MEDIATEK_NETSYS_V3))
developerfd40db22021-04-29 10:08:25 +0800795 /* MapE need remove ipv6 header and pingpong. */
796 if (do_mape_w2l_fast(state->in, skb)) {
797 if (!do_hnat_mape_w2l_fast(skb, state->in, __func__))
798 return NF_STOLEN;
799 else
800 return NF_ACCEPT;
801 }
802
803 if (is_from_mape(skb))
804 clr_from_extge(skb);
developerf4c370a2022-10-08 17:01:19 +0800805#endif
developere8b7dfa2023-04-20 10:16:44 +0800806 if (xlat_toggle)
807 mtk_464xlat_pre_process(skb);
808
developerfd40db22021-04-29 10:08:25 +0800809 return NF_ACCEPT;
810drop:
developer577ad2f2022-11-28 10:33:36 +0800811 if (skb)
812 printk_ratelimited(KERN_WARNING
813 "%s:drop (in_dev=%s, iif=0x%x, CB2=0x%x, ppe_hash=0x%x,\n"
814 "sport=0x%x, reason=0x%x, alg=0x%x)\n",
815 __func__, state->in->name, skb_hnat_iface(skb),
816 HNAT_SKB_CB2(skb)->magic, skb_hnat_entry(skb),
817 skb_hnat_sport(skb), skb_hnat_reason(skb),
818 skb_hnat_alg(skb));
developerfd40db22021-04-29 10:08:25 +0800819
820 return NF_DROP;
821}
822
823static unsigned int
824mtk_hnat_ipv4_nf_pre_routing(void *priv, struct sk_buff *skb,
825 const struct nf_hook_state *state)
826{
developer577ad2f2022-11-28 10:33:36 +0800827 if (!skb)
828 goto drop;
829
developerfd40db22021-04-29 10:08:25 +0800830 if (!is_ppe_support_type(skb)) {
831 hnat_set_head_frags(state, skb, 1, hnat_set_alg);
832 return NF_ACCEPT;
833 }
834
835 hnat_set_head_frags(state, skb, -1, hnat_set_iif);
836
837 pre_routing_print(skb, state->in, state->out, __func__);
838
developerfd40db22021-04-29 10:08:25 +0800839 /* packets from external devices -> xxx ,step 1 , learning stage & bound stage*/
840 if (do_ext2ge_fast_try(state->in, skb)) {
841 if (!do_hnat_ext_to_ge(skb, state->in, __func__))
842 return NF_STOLEN;
developerfd40db22021-04-29 10:08:25 +0800843 return NF_ACCEPT;
844 }
845
846 /* packets form ge -> external device
847 * For standalone wan interface
848 */
849 if (do_ge2ext_fast(state->in, skb)) {
850 if (!do_hnat_ge_to_ext(skb, __func__))
851 return NF_STOLEN;
852 goto drop;
853 }
developere8b7dfa2023-04-20 10:16:44 +0800854 if (xlat_toggle)
855 mtk_464xlat_pre_process(skb);
developerfd40db22021-04-29 10:08:25 +0800856
857 return NF_ACCEPT;
858drop:
developer577ad2f2022-11-28 10:33:36 +0800859 if (skb)
860 printk_ratelimited(KERN_WARNING
861 "%s:drop (in_dev=%s, iif=0x%x, CB2=0x%x, ppe_hash=0x%x,\n"
862 "sport=0x%x, reason=0x%x, alg=0x%x)\n",
863 __func__, state->in->name, skb_hnat_iface(skb),
864 HNAT_SKB_CB2(skb)->magic, skb_hnat_entry(skb),
865 skb_hnat_sport(skb), skb_hnat_reason(skb),
866 skb_hnat_alg(skb));
developerfd40db22021-04-29 10:08:25 +0800867
868 return NF_DROP;
869}
870
871static unsigned int
872mtk_hnat_br_nf_local_in(void *priv, struct sk_buff *skb,
873 const struct nf_hook_state *state)
874{
developerfd40db22021-04-29 10:08:25 +0800875 struct vlan_ethhdr *veth;
876
developer577ad2f2022-11-28 10:33:36 +0800877 if (!skb)
878 goto drop;
879
developer34028fb2022-01-11 13:51:29 +0800880 if (IS_HQOS_MODE && hnat_priv->data->whnat) {
developerfd40db22021-04-29 10:08:25 +0800881 veth = (struct vlan_ethhdr *)skb_mac_header(skb);
882
883 if (eth_hdr(skb)->h_proto == HQOS_MAGIC_TAG) {
884 skb_hnat_entry(skb) = ntohs(veth->h_vlan_TCI) & 0x3fff;
885 skb_hnat_reason(skb) = HIT_BIND_FORCE_TO_CPU;
886 }
887 }
developerfd40db22021-04-29 10:08:25 +0800888
889 if (!HAS_HQOS_MAGIC_TAG(skb) && !is_ppe_support_type(skb)) {
890 hnat_set_head_frags(state, skb, 1, hnat_set_alg);
891 return NF_ACCEPT;
892 }
893
894 hnat_set_head_frags(state, skb, -1, hnat_set_iif);
895
896 pre_routing_print(skb, state->in, state->out, __func__);
897
898 if (unlikely(debug_level >= 7)) {
899 hnat_cpu_reason_cnt(skb);
900 if (skb_hnat_reason(skb) == dbg_cpu_reason)
901 foe_dump_pkt(skb);
902 }
903
developerfd40db22021-04-29 10:08:25 +0800904 /* packets from external devices -> xxx ,step 1 , learning stage & bound stage*/
905 if ((skb_hnat_iface(skb) == FOE_MAGIC_EXT) && !is_from_extge(skb) &&
906 !is_multicast_ether_addr(eth_hdr(skb)->h_dest)) {
907 if (!hnat_priv->g_ppdev)
908 hnat_priv->g_ppdev = dev_get_by_name(&init_net, hnat_priv->ppd);
909
910 if (!do_hnat_ext_to_ge(skb, state->in, __func__))
911 return NF_STOLEN;
developerfd40db22021-04-29 10:08:25 +0800912 return NF_ACCEPT;
913 }
914
915 if (hnat_priv->data->whnat) {
916 if (skb_hnat_iface(skb) == FOE_MAGIC_EXT)
917 clr_from_extge(skb);
918
919 /* packets from external devices -> xxx ,step 2, learning stage */
developeraf07fad2021-11-19 17:53:42 +0800920 if (do_ext2ge_fast_learn(state->in, skb) && (!qos_toggle ||
921 (qos_toggle && eth_hdr(skb)->h_proto != HQOS_MAGIC_TAG))) {
developerfd40db22021-04-29 10:08:25 +0800922 if (!do_hnat_ext_to_ge2(skb, __func__))
923 return NF_STOLEN;
924 goto drop;
925 }
926
927 /* packets form ge -> external device */
928 if (do_ge2ext_fast(state->in, skb)) {
929 if (!do_hnat_ge_to_ext(skb, __func__))
930 return NF_STOLEN;
931 goto drop;
932 }
933 }
934
developerf4c370a2022-10-08 17:01:19 +0800935#if !(defined(CONFIG_MEDIATEK_NETSYS_V2) || defined(CONFIG_MEDIATEK_NETSYS_V3))
developerfd40db22021-04-29 10:08:25 +0800936 /* MapE need remove ipv6 header and pingpong. (bridge mode) */
937 if (do_mape_w2l_fast(state->in, skb)) {
938 if (!do_hnat_mape_w2l_fast(skb, state->in, __func__))
939 return NF_STOLEN;
940 else
941 return NF_ACCEPT;
942 }
developerf4c370a2022-10-08 17:01:19 +0800943#endif
developerfd40db22021-04-29 10:08:25 +0800944 return NF_ACCEPT;
945drop:
developer577ad2f2022-11-28 10:33:36 +0800946 if (skb)
947 printk_ratelimited(KERN_WARNING
948 "%s:drop (in_dev=%s, iif=0x%x, CB2=0x%x, ppe_hash=0x%x,\n"
949 "sport=0x%x, reason=0x%x, alg=0x%x)\n",
950 __func__, state->in->name, skb_hnat_iface(skb),
951 HNAT_SKB_CB2(skb)->magic, skb_hnat_entry(skb),
952 skb_hnat_sport(skb), skb_hnat_reason(skb),
953 skb_hnat_alg(skb));
developerfd40db22021-04-29 10:08:25 +0800954
955 return NF_DROP;
956}
957
958static unsigned int hnat_ipv6_get_nexthop(struct sk_buff *skb,
959 const struct net_device *out,
960 struct flow_offload_hw_path *hw_path)
961{
962 const struct in6_addr *ipv6_nexthop;
963 struct neighbour *neigh = NULL;
964 struct dst_entry *dst = skb_dst(skb);
965 struct ethhdr *eth;
966
967 if (hw_path->flags & FLOW_OFFLOAD_PATH_PPPOE) {
968 memcpy(eth_hdr(skb)->h_source, hw_path->eth_src, ETH_ALEN);
969 memcpy(eth_hdr(skb)->h_dest, hw_path->eth_dest, ETH_ALEN);
970 return 0;
971 }
972
973 rcu_read_lock_bh();
974 ipv6_nexthop =
975 rt6_nexthop((struct rt6_info *)dst, &ipv6_hdr(skb)->daddr);
976 neigh = __ipv6_neigh_lookup_noref(dst->dev, ipv6_nexthop);
977 if (unlikely(!neigh)) {
978 dev_notice(hnat_priv->dev, "%s:No neigh (daddr=%pI6)\n", __func__,
979 &ipv6_hdr(skb)->daddr);
980 rcu_read_unlock_bh();
981 return -1;
982 }
983
984 /* why do we get all zero ethernet address ? */
985 if (!is_valid_ether_addr(neigh->ha)) {
986 rcu_read_unlock_bh();
987 return -1;
988 }
989
990 if (ipv6_hdr(skb)->nexthdr == NEXTHDR_IPIP) {
991 /*copy ether type for DS-Lite and MapE */
992 eth = (struct ethhdr *)(skb->data - ETH_HLEN);
993 eth->h_proto = skb->protocol;
994 } else {
995 eth = eth_hdr(skb);
996 }
997
998 ether_addr_copy(eth->h_dest, neigh->ha);
999 ether_addr_copy(eth->h_source, out->dev_addr);
1000
1001 rcu_read_unlock_bh();
1002
1003 return 0;
1004}
1005
1006static unsigned int hnat_ipv4_get_nexthop(struct sk_buff *skb,
1007 const struct net_device *out,
1008 struct flow_offload_hw_path *hw_path)
1009{
1010 u32 nexthop;
1011 struct neighbour *neigh;
1012 struct dst_entry *dst = skb_dst(skb);
1013 struct rtable *rt = (struct rtable *)dst;
1014 struct net_device *dev = (__force struct net_device *)out;
1015
1016 if (hw_path->flags & FLOW_OFFLOAD_PATH_PPPOE) {
1017 memcpy(eth_hdr(skb)->h_source, hw_path->eth_src, ETH_ALEN);
1018 memcpy(eth_hdr(skb)->h_dest, hw_path->eth_dest, ETH_ALEN);
1019 return 0;
1020 }
1021
1022 rcu_read_lock_bh();
1023 nexthop = (__force u32)rt_nexthop(rt, ip_hdr(skb)->daddr);
1024 neigh = __ipv4_neigh_lookup_noref(dev, nexthop);
1025 if (unlikely(!neigh)) {
1026 dev_notice(hnat_priv->dev, "%s:No neigh (daddr=%pI4)\n", __func__,
1027 &ip_hdr(skb)->daddr);
1028 rcu_read_unlock_bh();
1029 return -1;
1030 }
1031
1032 /* why do we get all zero ethernet address ? */
1033 if (!is_valid_ether_addr(neigh->ha)) {
1034 rcu_read_unlock_bh();
1035 return -1;
1036 }
1037
1038 memcpy(eth_hdr(skb)->h_dest, neigh->ha, ETH_ALEN);
1039 memcpy(eth_hdr(skb)->h_source, out->dev_addr, ETH_ALEN);
1040
1041 rcu_read_unlock_bh();
1042
1043 return 0;
1044}
1045
1046static u16 ppe_get_chkbase(struct iphdr *iph)
1047{
1048 u16 org_chksum = ntohs(iph->check);
1049 u16 org_tot_len = ntohs(iph->tot_len);
1050 u16 org_id = ntohs(iph->id);
1051 u16 chksum_tmp, tot_len_tmp, id_tmp;
1052 u32 tmp = 0;
1053 u16 chksum_base = 0;
1054
1055 chksum_tmp = ~(org_chksum);
1056 tot_len_tmp = ~(org_tot_len);
1057 id_tmp = ~(org_id);
1058 tmp = chksum_tmp + tot_len_tmp + id_tmp;
1059 tmp = ((tmp >> 16) & 0x7) + (tmp & 0xFFFF);
1060 tmp = ((tmp >> 16) & 0x7) + (tmp & 0xFFFF);
1061 chksum_base = tmp & 0xFFFF;
1062
1063 return chksum_base;
1064}
1065
1066struct foe_entry ppe_fill_L2_info(struct ethhdr *eth, struct foe_entry entry,
1067 struct flow_offload_hw_path *hw_path)
1068{
developer5ffc5f12022-10-25 18:51:46 +08001069 switch ((int)entry.bfib1.pkt_type) {
developerfd40db22021-04-29 10:08:25 +08001070 case IPV4_HNAPT:
1071 case IPV4_HNAT:
1072 entry.ipv4_hnapt.dmac_hi = swab32(*((u32 *)eth->h_dest));
1073 entry.ipv4_hnapt.dmac_lo = swab16(*((u16 *)&eth->h_dest[4]));
1074 entry.ipv4_hnapt.smac_hi = swab32(*((u32 *)eth->h_source));
1075 entry.ipv4_hnapt.smac_lo = swab16(*((u16 *)&eth->h_source[4]));
1076 entry.ipv4_hnapt.pppoe_id = hw_path->pppoe_sid;
1077 break;
1078 case IPV4_DSLITE:
1079 case IPV4_MAP_E:
1080 case IPV6_6RD:
1081 case IPV6_5T_ROUTE:
1082 case IPV6_3T_ROUTE:
developer5ffc5f12022-10-25 18:51:46 +08001083 case IPV6_HNAPT:
1084 case IPV6_HNAT:
developerfd40db22021-04-29 10:08:25 +08001085 entry.ipv6_5t_route.dmac_hi = swab32(*((u32 *)eth->h_dest));
1086 entry.ipv6_5t_route.dmac_lo = swab16(*((u16 *)&eth->h_dest[4]));
1087 entry.ipv6_5t_route.smac_hi = swab32(*((u32 *)eth->h_source));
1088 entry.ipv6_5t_route.smac_lo =
1089 swab16(*((u16 *)&eth->h_source[4]));
1090 entry.ipv6_5t_route.pppoe_id = hw_path->pppoe_sid;
1091 break;
1092 }
1093 return entry;
1094}
1095
1096struct foe_entry ppe_fill_info_blk(struct ethhdr *eth, struct foe_entry entry,
1097 struct flow_offload_hw_path *hw_path)
1098{
1099 entry.bfib1.psn = (hw_path->flags & FLOW_OFFLOAD_PATH_PPPOE) ? 1 : 0;
1100 entry.bfib1.vlan_layer += (hw_path->flags & FLOW_OFFLOAD_PATH_VLAN) ? 1 : 0;
1101 entry.bfib1.vpm = (entry.bfib1.vlan_layer) ? 1 : 0;
developerfd40db22021-04-29 10:08:25 +08001102 entry.bfib1.cah = 1;
developer4164cfe2022-12-01 11:27:41 +08001103 entry.bfib1.time_stamp = (hnat_priv->data->version == MTK_HNAT_V2 ||
1104 hnat_priv->data->version == MTK_HNAT_V3) ?
developerfd40db22021-04-29 10:08:25 +08001105 readl(hnat_priv->fe_base + 0x0010) & (0xFF) :
1106 readl(hnat_priv->fe_base + 0x0010) & (0x7FFF);
1107
developer5ffc5f12022-10-25 18:51:46 +08001108 switch ((int)entry.bfib1.pkt_type) {
developerfd40db22021-04-29 10:08:25 +08001109 case IPV4_HNAPT:
1110 case IPV4_HNAT:
developer8116b0a2021-08-23 18:07:20 +08001111 if (hnat_priv->data->mcast &&
1112 is_multicast_ether_addr(&eth->h_dest[0])) {
developerfd40db22021-04-29 10:08:25 +08001113 entry.ipv4_hnapt.iblk2.mcast = 1;
developer4164cfe2022-12-01 11:27:41 +08001114 if (hnat_priv->data->version == MTK_HNAT_V1_3) {
developerfd40db22021-04-29 10:08:25 +08001115 entry.bfib1.sta = 1;
1116 entry.ipv4_hnapt.m_timestamp = foe_timestamp(hnat_priv);
1117 }
1118 } else {
1119 entry.ipv4_hnapt.iblk2.mcast = 0;
1120 }
1121
1122 entry.ipv4_hnapt.iblk2.port_ag =
developer4164cfe2022-12-01 11:27:41 +08001123 (hnat_priv->data->version == MTK_HNAT_V2 ||
1124 hnat_priv->data->version == MTK_HNAT_V3) ? 0xf : 0x3f;
developerfd40db22021-04-29 10:08:25 +08001125 break;
1126 case IPV4_DSLITE:
1127 case IPV4_MAP_E:
1128 case IPV6_6RD:
1129 case IPV6_5T_ROUTE:
1130 case IPV6_3T_ROUTE:
developer5ffc5f12022-10-25 18:51:46 +08001131 case IPV6_HNAPT:
1132 case IPV6_HNAT:
developer8116b0a2021-08-23 18:07:20 +08001133 if (hnat_priv->data->mcast &&
1134 is_multicast_ether_addr(&eth->h_dest[0])) {
developerfd40db22021-04-29 10:08:25 +08001135 entry.ipv6_5t_route.iblk2.mcast = 1;
developer4164cfe2022-12-01 11:27:41 +08001136 if (hnat_priv->data->version == MTK_HNAT_V1_3) {
developerfd40db22021-04-29 10:08:25 +08001137 entry.bfib1.sta = 1;
1138 entry.ipv4_hnapt.m_timestamp = foe_timestamp(hnat_priv);
1139 }
1140 } else {
1141 entry.ipv6_5t_route.iblk2.mcast = 0;
1142 }
1143
1144 entry.ipv6_5t_route.iblk2.port_ag =
developer4164cfe2022-12-01 11:27:41 +08001145 (hnat_priv->data->version == MTK_HNAT_V2 ||
1146 hnat_priv->data->version == MTK_HNAT_V3) ? 0xf : 0x3f;
developerfd40db22021-04-29 10:08:25 +08001147 break;
1148 }
1149 return entry;
1150}
1151
developerfd40db22021-04-29 10:08:25 +08001152static unsigned int skb_to_hnat_info(struct sk_buff *skb,
1153 const struct net_device *dev,
1154 struct foe_entry *foe,
1155 struct flow_offload_hw_path *hw_path)
1156{
1157 struct foe_entry entry = { 0 };
1158 int whnat = IS_WHNAT(dev);
1159 struct ethhdr *eth;
1160 struct iphdr *iph;
1161 struct ipv6hdr *ip6h;
1162 struct tcpudphdr _ports;
1163 const struct tcpudphdr *pptr;
developer5ffc5f12022-10-25 18:51:46 +08001164 struct nf_conn *ct;
1165 enum ip_conntrack_info ctinfo;
developerfd40db22021-04-29 10:08:25 +08001166 u32 gmac = NR_DISCARD;
1167 int udp = 0;
1168 u32 qid = 0;
developeraf07fad2021-11-19 17:53:42 +08001169 u32 port_id = 0;
developerfd40db22021-04-29 10:08:25 +08001170 int mape = 0;
1171
developer5ffc5f12022-10-25 18:51:46 +08001172 ct = nf_ct_get(skb, &ctinfo);
1173
developerfd40db22021-04-29 10:08:25 +08001174 if (ipv6_hdr(skb)->nexthdr == NEXTHDR_IPIP)
1175 /* point to ethernet header for DS-Lite and MapE */
1176 eth = (struct ethhdr *)(skb->data - ETH_HLEN);
1177 else
1178 eth = eth_hdr(skb);
developer8116b0a2021-08-23 18:07:20 +08001179
1180 /*do not bind multicast if PPE mcast not enable*/
1181 if (!hnat_priv->data->mcast && is_multicast_ether_addr(eth->h_dest))
1182 return 0;
developerfd40db22021-04-29 10:08:25 +08001183
1184 entry.bfib1.pkt_type = foe->udib1.pkt_type; /* Get packte type state*/
developerf94d8862022-03-29 10:11:17 +08001185 entry.bfib1.state = foe->udib1.state;
1186
developerd35bbcc2022-09-28 22:46:01 +08001187#if defined(CONFIG_MEDIATEK_NETSYS_V2) || defined(CONFIG_MEDIATEK_NETSYS_V3)
developerfd40db22021-04-29 10:08:25 +08001188 entry.bfib1.sp = foe->udib1.sp;
1189#endif
1190
1191 switch (ntohs(eth->h_proto)) {
1192 case ETH_P_IP:
1193 iph = ip_hdr(skb);
1194 switch (iph->protocol) {
1195 case IPPROTO_UDP:
1196 udp = 1;
1197 /* fallthrough */
1198 case IPPROTO_TCP:
1199 entry.ipv4_hnapt.etype = htons(ETH_P_IP);
1200
1201 /* DS-Lite WAN->LAN */
1202 if (entry.ipv4_hnapt.bfib1.pkt_type == IPV4_DSLITE ||
1203 entry.ipv4_hnapt.bfib1.pkt_type == IPV4_MAP_E) {
1204 entry.ipv4_dslite.sip = foe->ipv4_dslite.sip;
1205 entry.ipv4_dslite.dip = foe->ipv4_dslite.dip;
1206 entry.ipv4_dslite.sport =
1207 foe->ipv4_dslite.sport;
1208 entry.ipv4_dslite.dport =
1209 foe->ipv4_dslite.dport;
1210
developerd35bbcc2022-09-28 22:46:01 +08001211#if defined(CONFIG_MEDIATEK_NETSYS_V2) || defined(CONFIG_MEDIATEK_NETSYS_V3)
developerfd40db22021-04-29 10:08:25 +08001212 if (entry.bfib1.pkt_type == IPV4_MAP_E) {
1213 pptr = skb_header_pointer(skb,
1214 iph->ihl * 4,
1215 sizeof(_ports),
1216 &_ports);
developer4c32b7a2021-11-13 16:46:43 +08001217 if (unlikely(!pptr))
1218 return -1;
developerfd40db22021-04-29 10:08:25 +08001219
developerd35bbcc2022-09-28 22:46:01 +08001220 entry.ipv4_mape.new_sip =
developerfd40db22021-04-29 10:08:25 +08001221 ntohl(iph->saddr);
developerd35bbcc2022-09-28 22:46:01 +08001222 entry.ipv4_mape.new_dip =
developerfd40db22021-04-29 10:08:25 +08001223 ntohl(iph->daddr);
developerd35bbcc2022-09-28 22:46:01 +08001224 entry.ipv4_mape.new_sport =
developerfd40db22021-04-29 10:08:25 +08001225 ntohs(pptr->src);
developerd35bbcc2022-09-28 22:46:01 +08001226 entry.ipv4_mape.new_dport =
developerfd40db22021-04-29 10:08:25 +08001227 ntohs(pptr->dst);
1228 }
1229#endif
1230
1231 entry.ipv4_dslite.tunnel_sipv6_0 =
1232 foe->ipv4_dslite.tunnel_sipv6_0;
1233 entry.ipv4_dslite.tunnel_sipv6_1 =
1234 foe->ipv4_dslite.tunnel_sipv6_1;
1235 entry.ipv4_dslite.tunnel_sipv6_2 =
1236 foe->ipv4_dslite.tunnel_sipv6_2;
1237 entry.ipv4_dslite.tunnel_sipv6_3 =
1238 foe->ipv4_dslite.tunnel_sipv6_3;
1239
1240 entry.ipv4_dslite.tunnel_dipv6_0 =
1241 foe->ipv4_dslite.tunnel_dipv6_0;
1242 entry.ipv4_dslite.tunnel_dipv6_1 =
1243 foe->ipv4_dslite.tunnel_dipv6_1;
1244 entry.ipv4_dslite.tunnel_dipv6_2 =
1245 foe->ipv4_dslite.tunnel_dipv6_2;
1246 entry.ipv4_dslite.tunnel_dipv6_3 =
1247 foe->ipv4_dslite.tunnel_dipv6_3;
1248
1249 entry.ipv4_dslite.bfib1.rmt = 1;
1250 entry.ipv4_dslite.iblk2.dscp = iph->tos;
1251 entry.ipv4_dslite.vlan1 = hw_path->vlan_id;
1252 if (hnat_priv->data->per_flow_accounting)
1253 entry.ipv4_dslite.iblk2.mibf = 1;
1254
1255 } else {
1256 entry.ipv4_hnapt.iblk2.dscp = iph->tos;
1257 if (hnat_priv->data->per_flow_accounting)
1258 entry.ipv4_hnapt.iblk2.mibf = 1;
1259
1260 entry.ipv4_hnapt.vlan1 = hw_path->vlan_id;
1261
developerdfc8ef52022-12-06 14:00:09 +08001262 if (skb_vlan_tag_present(skb)) {
developerfd40db22021-04-29 10:08:25 +08001263 entry.bfib1.vlan_layer += 1;
1264
1265 if (entry.ipv4_hnapt.vlan1)
developerdfc8ef52022-12-06 14:00:09 +08001266 entry.ipv4_hnapt.vlan2 =
1267 skb->vlan_tci;
developerfd40db22021-04-29 10:08:25 +08001268 else
developerdfc8ef52022-12-06 14:00:09 +08001269 entry.ipv4_hnapt.vlan1 =
1270 skb->vlan_tci;
1271 }
developerfd40db22021-04-29 10:08:25 +08001272
1273 entry.ipv4_hnapt.sip = foe->ipv4_hnapt.sip;
1274 entry.ipv4_hnapt.dip = foe->ipv4_hnapt.dip;
1275 entry.ipv4_hnapt.sport = foe->ipv4_hnapt.sport;
1276 entry.ipv4_hnapt.dport = foe->ipv4_hnapt.dport;
1277
1278 entry.ipv4_hnapt.new_sip = ntohl(iph->saddr);
1279 entry.ipv4_hnapt.new_dip = ntohl(iph->daddr);
1280 }
1281
1282 entry.ipv4_hnapt.bfib1.udp = udp;
1283 if (IS_IPV4_HNAPT(foe)) {
1284 pptr = skb_header_pointer(skb, iph->ihl * 4,
1285 sizeof(_ports),
1286 &_ports);
developer4c32b7a2021-11-13 16:46:43 +08001287 if (unlikely(!pptr))
1288 return -1;
1289
developerfd40db22021-04-29 10:08:25 +08001290 entry.ipv4_hnapt.new_sport = ntohs(pptr->src);
1291 entry.ipv4_hnapt.new_dport = ntohs(pptr->dst);
1292 }
1293
1294 break;
1295
1296 default:
1297 return -1;
1298 }
1299 trace_printk(
1300 "[%s]skb->head=%p, skb->data=%p,ip_hdr=%p, skb->len=%d, skb->data_len=%d\n",
1301 __func__, skb->head, skb->data, iph, skb->len,
1302 skb->data_len);
1303 break;
1304
1305 case ETH_P_IPV6:
1306 ip6h = ipv6_hdr(skb);
1307 switch (ip6h->nexthdr) {
1308 case NEXTHDR_UDP:
1309 udp = 1;
1310 /* fallthrough */
1311 case NEXTHDR_TCP: /* IPv6-5T or IPv6-3T */
1312 entry.ipv6_5t_route.etype = htons(ETH_P_IPV6);
1313
1314 entry.ipv6_5t_route.vlan1 = hw_path->vlan_id;
1315
developerdfc8ef52022-12-06 14:00:09 +08001316 if (skb_vlan_tag_present(skb)) {
developerfd40db22021-04-29 10:08:25 +08001317 entry.bfib1.vlan_layer += 1;
1318
1319 if (entry.ipv6_5t_route.vlan1)
developerdfc8ef52022-12-06 14:00:09 +08001320 entry.ipv6_5t_route.vlan2 =
1321 skb->vlan_tci;
developerfd40db22021-04-29 10:08:25 +08001322 else
developerdfc8ef52022-12-06 14:00:09 +08001323 entry.ipv6_5t_route.vlan1 =
1324 skb->vlan_tci;
developerfd40db22021-04-29 10:08:25 +08001325 }
1326
1327 if (hnat_priv->data->per_flow_accounting)
1328 entry.ipv6_5t_route.iblk2.mibf = 1;
1329 entry.ipv6_5t_route.bfib1.udp = udp;
1330
1331 if (IS_IPV6_6RD(foe)) {
1332 entry.ipv6_5t_route.bfib1.rmt = 1;
1333 entry.ipv6_6rd.tunnel_sipv4 =
1334 foe->ipv6_6rd.tunnel_sipv4;
1335 entry.ipv6_6rd.tunnel_dipv4 =
1336 foe->ipv6_6rd.tunnel_dipv4;
1337 }
1338
1339 entry.ipv6_3t_route.ipv6_sip0 =
1340 foe->ipv6_3t_route.ipv6_sip0;
1341 entry.ipv6_3t_route.ipv6_sip1 =
1342 foe->ipv6_3t_route.ipv6_sip1;
1343 entry.ipv6_3t_route.ipv6_sip2 =
1344 foe->ipv6_3t_route.ipv6_sip2;
1345 entry.ipv6_3t_route.ipv6_sip3 =
1346 foe->ipv6_3t_route.ipv6_sip3;
1347
1348 entry.ipv6_3t_route.ipv6_dip0 =
1349 foe->ipv6_3t_route.ipv6_dip0;
1350 entry.ipv6_3t_route.ipv6_dip1 =
1351 foe->ipv6_3t_route.ipv6_dip1;
1352 entry.ipv6_3t_route.ipv6_dip2 =
1353 foe->ipv6_3t_route.ipv6_dip2;
1354 entry.ipv6_3t_route.ipv6_dip3 =
1355 foe->ipv6_3t_route.ipv6_dip3;
1356
developer729f0272021-06-09 17:28:38 +08001357 if (IS_IPV6_3T_ROUTE(foe)) {
1358 entry.ipv6_3t_route.prot =
1359 foe->ipv6_3t_route.prot;
1360 entry.ipv6_3t_route.hph =
1361 foe->ipv6_3t_route.hph;
1362 }
1363
developerfd40db22021-04-29 10:08:25 +08001364 if (IS_IPV6_5T_ROUTE(foe) || IS_IPV6_6RD(foe)) {
1365 entry.ipv6_5t_route.sport =
1366 foe->ipv6_5t_route.sport;
1367 entry.ipv6_5t_route.dport =
1368 foe->ipv6_5t_route.dport;
1369 }
developer5ffc5f12022-10-25 18:51:46 +08001370
1371#if defined(CONFIG_MEDIATEK_NETSYS_V3)
1372 if (ct && (ct->status & IPS_SRC_NAT)) {
1373 entry.bfib1.pkt_type = IPV6_HNAPT;
1374
1375 if (IS_WAN(dev) || IS_DSA_WAN(dev)) {
1376 entry.ipv6_hnapt.eg_ipv6_dir =
1377 IPV6_SNAT;
1378 entry.ipv6_hnapt.new_ipv6_ip0 =
1379 ntohl(ip6h->saddr.s6_addr32[0]);
1380 entry.ipv6_hnapt.new_ipv6_ip1 =
1381 ntohl(ip6h->saddr.s6_addr32[1]);
1382 entry.ipv6_hnapt.new_ipv6_ip2 =
1383 ntohl(ip6h->saddr.s6_addr32[2]);
1384 entry.ipv6_hnapt.new_ipv6_ip3 =
1385 ntohl(ip6h->saddr.s6_addr32[3]);
1386 } else {
1387 entry.ipv6_hnapt.eg_ipv6_dir =
1388 IPV6_DNAT;
1389 entry.ipv6_hnapt.new_ipv6_ip0 =
1390 ntohl(ip6h->daddr.s6_addr32[0]);
1391 entry.ipv6_hnapt.new_ipv6_ip1 =
1392 ntohl(ip6h->daddr.s6_addr32[1]);
1393 entry.ipv6_hnapt.new_ipv6_ip2 =
1394 ntohl(ip6h->daddr.s6_addr32[2]);
1395 entry.ipv6_hnapt.new_ipv6_ip3 =
1396 ntohl(ip6h->daddr.s6_addr32[3]);
1397 }
1398
1399 pptr = skb_header_pointer(skb, IPV6_HDR_LEN,
1400 sizeof(_ports),
1401 &_ports);
1402 if (unlikely(!pptr))
1403 return -1;
1404
1405 entry.ipv6_hnapt.new_sport = ntohs(pptr->src);
1406 entry.ipv6_hnapt.new_dport = ntohs(pptr->dst);
1407 }
1408#endif
1409
developerfd40db22021-04-29 10:08:25 +08001410 entry.ipv6_5t_route.iblk2.dscp =
1411 (ip6h->priority << 4 |
1412 (ip6h->flow_lbl[0] >> 4));
1413 break;
1414
1415 case NEXTHDR_IPIP:
1416 if ((!mape_toggle &&
1417 entry.bfib1.pkt_type == IPV4_DSLITE) ||
1418 (mape_toggle &&
1419 entry.bfib1.pkt_type == IPV4_MAP_E)) {
1420 /* DS-Lite LAN->WAN */
1421 entry.ipv4_dslite.bfib1.udp =
1422 foe->ipv4_dslite.bfib1.udp;
1423 entry.ipv4_dslite.sip = foe->ipv4_dslite.sip;
1424 entry.ipv4_dslite.dip = foe->ipv4_dslite.dip;
1425 entry.ipv4_dslite.sport =
1426 foe->ipv4_dslite.sport;
1427 entry.ipv4_dslite.dport =
1428 foe->ipv4_dslite.dport;
1429
1430 entry.ipv4_dslite.tunnel_sipv6_0 =
1431 ntohl(ip6h->saddr.s6_addr32[0]);
1432 entry.ipv4_dslite.tunnel_sipv6_1 =
1433 ntohl(ip6h->saddr.s6_addr32[1]);
1434 entry.ipv4_dslite.tunnel_sipv6_2 =
1435 ntohl(ip6h->saddr.s6_addr32[2]);
1436 entry.ipv4_dslite.tunnel_sipv6_3 =
1437 ntohl(ip6h->saddr.s6_addr32[3]);
1438
1439 entry.ipv4_dslite.tunnel_dipv6_0 =
1440 ntohl(ip6h->daddr.s6_addr32[0]);
1441 entry.ipv4_dslite.tunnel_dipv6_1 =
1442 ntohl(ip6h->daddr.s6_addr32[1]);
1443 entry.ipv4_dslite.tunnel_dipv6_2 =
1444 ntohl(ip6h->daddr.s6_addr32[2]);
1445 entry.ipv4_dslite.tunnel_dipv6_3 =
1446 ntohl(ip6h->daddr.s6_addr32[3]);
1447
1448 ppe_fill_flow_lbl(&entry, ip6h);
1449
1450 entry.ipv4_dslite.priority = ip6h->priority;
1451 entry.ipv4_dslite.hop_limit = ip6h->hop_limit;
1452 entry.ipv4_dslite.vlan1 = hw_path->vlan_id;
1453 if (hnat_priv->data->per_flow_accounting)
1454 entry.ipv4_dslite.iblk2.mibf = 1;
developer25fc8c02022-05-06 16:24:02 +08001455 /* Map-E LAN->WAN record inner IPv4 header info. */
developer8c707df2022-10-24 14:09:00 +08001456#if defined(CONFIG_MEDIATEK_NETSYS_V2) || defined(CONFIG_MEDIATEK_NETSYS_V3)
developer25fc8c02022-05-06 16:24:02 +08001457 if (mape_toggle) {
1458 entry.ipv4_dslite.iblk2.dscp = foe->ipv4_dslite.iblk2.dscp;
developerd35bbcc2022-09-28 22:46:01 +08001459 entry.ipv4_mape.new_sip = foe->ipv4_mape.new_sip;
1460 entry.ipv4_mape.new_dip = foe->ipv4_mape.new_dip;
1461 entry.ipv4_mape.new_sport = foe->ipv4_mape.new_sport;
1462 entry.ipv4_mape.new_dport = foe->ipv4_mape.new_dport;
developer25fc8c02022-05-06 16:24:02 +08001463 }
1464#endif
developerfd40db22021-04-29 10:08:25 +08001465 } else if (mape_toggle &&
1466 entry.bfib1.pkt_type == IPV4_HNAPT) {
1467 /* MapE LAN -> WAN */
1468 mape = 1;
1469 entry.ipv4_hnapt.iblk2.dscp =
1470 foe->ipv4_hnapt.iblk2.dscp;
1471 if (hnat_priv->data->per_flow_accounting)
1472 entry.ipv4_hnapt.iblk2.mibf = 1;
1473
developerbb816412021-06-11 15:43:44 +08001474 if (IS_GMAC1_MODE)
1475 entry.ipv4_hnapt.vlan1 = 1;
1476 else
1477 entry.ipv4_hnapt.vlan1 = hw_path->vlan_id;
developerfd40db22021-04-29 10:08:25 +08001478
1479 entry.ipv4_hnapt.sip = foe->ipv4_hnapt.sip;
1480 entry.ipv4_hnapt.dip = foe->ipv4_hnapt.dip;
1481 entry.ipv4_hnapt.sport = foe->ipv4_hnapt.sport;
1482 entry.ipv4_hnapt.dport = foe->ipv4_hnapt.dport;
1483
1484 entry.ipv4_hnapt.new_sip =
1485 foe->ipv4_hnapt.new_sip;
1486 entry.ipv4_hnapt.new_dip =
1487 foe->ipv4_hnapt.new_dip;
1488 entry.ipv4_hnapt.etype = htons(ETH_P_IP);
1489
developer34028fb2022-01-11 13:51:29 +08001490 if (IS_HQOS_MODE) {
developeraf07fad2021-11-19 17:53:42 +08001491 entry.ipv4_hnapt.iblk2.qid =
developer4164cfe2022-12-01 11:27:41 +08001492 (hnat_priv->data->version ==
1493 MTK_HNAT_V2 ||
1494 hnat_priv->data->version ==
1495 MTK_HNAT_V3) ?
developeraf07fad2021-11-19 17:53:42 +08001496 skb->mark & 0x7f : skb->mark & 0xf;
developerd35bbcc2022-09-28 22:46:01 +08001497#if defined(CONFIG_MEDIATEK_NETSYS_V3)
developer934756a2022-11-18 14:51:34 +08001498 if ((IS_HQOS_UL_MODE && IS_WAN(dev)) ||
developer493adc32022-11-29 22:34:18 +08001499 (IS_HQOS_DL_MODE &&
1500 IS_LAN_GRP(dev)) ||
developer934756a2022-11-18 14:51:34 +08001501 (IS_PPPQ_MODE &&
1502 IS_PPPQ_PATH(dev, skb)))
developer47545a32022-11-15 16:06:58 +08001503 entry.ipv4_hnapt.tport_id = 1;
1504 else
1505 entry.ipv4_hnapt.tport_id = 0;
developerd35bbcc2022-09-28 22:46:01 +08001506#else
developeraf07fad2021-11-19 17:53:42 +08001507 entry.ipv4_hnapt.iblk2.fqos = 1;
developerd35bbcc2022-09-28 22:46:01 +08001508#endif
developeraf07fad2021-11-19 17:53:42 +08001509 }
developerfd40db22021-04-29 10:08:25 +08001510
1511 entry.ipv4_hnapt.bfib1.udp =
1512 foe->ipv4_hnapt.bfib1.udp;
1513
1514 entry.ipv4_hnapt.new_sport =
1515 foe->ipv4_hnapt.new_sport;
1516 entry.ipv4_hnapt.new_dport =
1517 foe->ipv4_hnapt.new_dport;
1518 mape_l2w_v6h = *ip6h;
1519 }
1520 break;
1521
1522 default:
1523 return -1;
1524 }
1525
1526 trace_printk(
1527 "[%s]skb->head=%p, skb->data=%p,ipv6_hdr=%p, skb->len=%d, skb->data_len=%d\n",
1528 __func__, skb->head, skb->data, ip6h, skb->len,
1529 skb->data_len);
1530 break;
1531
1532 default:
developerfd40db22021-04-29 10:08:25 +08001533 iph = ip_hdr(skb);
1534 switch (entry.bfib1.pkt_type) {
1535 case IPV6_6RD: /* 6RD LAN->WAN */
1536 entry.ipv6_6rd.ipv6_sip0 = foe->ipv6_6rd.ipv6_sip0;
1537 entry.ipv6_6rd.ipv6_sip1 = foe->ipv6_6rd.ipv6_sip1;
1538 entry.ipv6_6rd.ipv6_sip2 = foe->ipv6_6rd.ipv6_sip2;
1539 entry.ipv6_6rd.ipv6_sip3 = foe->ipv6_6rd.ipv6_sip3;
1540
1541 entry.ipv6_6rd.ipv6_dip0 = foe->ipv6_6rd.ipv6_dip0;
1542 entry.ipv6_6rd.ipv6_dip1 = foe->ipv6_6rd.ipv6_dip1;
1543 entry.ipv6_6rd.ipv6_dip2 = foe->ipv6_6rd.ipv6_dip2;
1544 entry.ipv6_6rd.ipv6_dip3 = foe->ipv6_6rd.ipv6_dip3;
1545
1546 entry.ipv6_6rd.sport = foe->ipv6_6rd.sport;
1547 entry.ipv6_6rd.dport = foe->ipv6_6rd.dport;
1548 entry.ipv6_6rd.tunnel_sipv4 = ntohl(iph->saddr);
1549 entry.ipv6_6rd.tunnel_dipv4 = ntohl(iph->daddr);
1550 entry.ipv6_6rd.hdr_chksum = ppe_get_chkbase(iph);
1551 entry.ipv6_6rd.flag = (ntohs(iph->frag_off) >> 13);
1552 entry.ipv6_6rd.ttl = iph->ttl;
1553 entry.ipv6_6rd.dscp = iph->tos;
1554 entry.ipv6_6rd.per_flow_6rd_id = 1;
1555 entry.ipv6_6rd.vlan1 = hw_path->vlan_id;
1556 if (hnat_priv->data->per_flow_accounting)
1557 entry.ipv6_6rd.iblk2.mibf = 1;
1558 break;
1559
1560 default:
1561 return -1;
1562 }
1563 }
1564
1565 /* Fill Layer2 Info.*/
1566 entry = ppe_fill_L2_info(eth, entry, hw_path);
1567
1568 /* Fill Info Blk*/
1569 entry = ppe_fill_info_blk(eth, entry, hw_path);
1570
1571 if (IS_LAN(dev)) {
1572 if (IS_DSA_LAN(dev))
developeraf07fad2021-11-19 17:53:42 +08001573 port_id = hnat_dsa_fill_stag(dev, &entry, hw_path,
1574 ntohs(eth->h_proto),
1575 mape);
developerfd40db22021-04-29 10:08:25 +08001576
1577 if (IS_BOND_MODE)
1578 gmac = ((skb_hnat_entry(skb) >> 1) % hnat_priv->gmac_num) ?
1579 NR_GMAC2_PORT : NR_GMAC1_PORT;
1580 else
1581 gmac = NR_GMAC1_PORT;
developerd35bbcc2022-09-28 22:46:01 +08001582 } else if (IS_LAN2(dev)) {
1583 gmac = NR_GMAC3_PORT;
developerfd40db22021-04-29 10:08:25 +08001584 } else if (IS_WAN(dev)) {
1585 if (IS_DSA_WAN(dev))
developeraf07fad2021-11-19 17:53:42 +08001586 port_id = hnat_dsa_fill_stag(dev,&entry, hw_path,
1587 ntohs(eth->h_proto),
1588 mape);
developerfd40db22021-04-29 10:08:25 +08001589 if (mape_toggle && mape == 1) {
1590 gmac = NR_PDMA_PORT;
1591 /* Set act_dp = wan_dev */
1592 entry.ipv4_hnapt.act_dp = dev->ifindex;
1593 } else {
1594 gmac = (IS_GMAC1_MODE) ? NR_GMAC1_PORT : NR_GMAC2_PORT;
1595 }
developerd35bbcc2022-09-28 22:46:01 +08001596 } else if (IS_EXT(dev) && (FROM_GE_PPD(skb) || FROM_GE_LAN_GRP(skb) ||
developer99506e52021-06-30 22:03:02 +08001597 FROM_GE_WAN(skb) || FROM_GE_VIRTUAL(skb) || FROM_WED(skb))) {
developerfd40db22021-04-29 10:08:25 +08001598 if (!hnat_priv->data->whnat && IS_GMAC1_MODE) {
1599 entry.bfib1.vpm = 1;
1600 entry.bfib1.vlan_layer = 1;
1601
1602 if (FROM_GE_LAN(skb))
1603 entry.ipv4_hnapt.vlan1 = 1;
1604 else if (FROM_GE_WAN(skb) || FROM_GE_VIRTUAL(skb))
1605 entry.ipv4_hnapt.vlan1 = 2;
1606 }
1607
1608 trace_printk("learn of lan or wan(iif=%x) --> %s(ext)\n",
1609 skb_hnat_iface(skb), dev->name);
1610 /* To CPU then stolen by pre-routing hant hook of LAN/WAN
1611 * Current setting is PDMA RX.
1612 */
1613 gmac = NR_PDMA_PORT;
1614 if (IS_IPV4_GRP(foe))
1615 entry.ipv4_hnapt.act_dp = dev->ifindex;
1616 else
1617 entry.ipv6_5t_route.act_dp = dev->ifindex;
1618 } else {
1619 printk_ratelimited(KERN_WARNING
1620 "Unknown case of dp, iif=%x --> %s\n",
1621 skb_hnat_iface(skb), dev->name);
1622
1623 return 0;
1624 }
1625
developerafff5662022-06-29 10:09:56 +08001626 if (IS_HQOS_MODE || skb->mark >= MAX_PPPQ_PORT_NUM)
developeraf07fad2021-11-19 17:53:42 +08001627 qid = skb->mark & (MTK_QDMA_TX_MASK);
developer934756a2022-11-18 14:51:34 +08001628 else if (IS_PPPQ_MODE && IS_PPPQ_PATH(dev, skb))
developeraf07fad2021-11-19 17:53:42 +08001629 qid = port_id & MTK_QDMA_TX_MASK;
1630 else
1631 qid = 0;
developerfd40db22021-04-29 10:08:25 +08001632
1633 if (IS_IPV4_GRP(foe)) {
1634 entry.ipv4_hnapt.iblk2.dp = gmac;
1635 entry.ipv4_hnapt.iblk2.port_mg =
developer4164cfe2022-12-01 11:27:41 +08001636 (hnat_priv->data->version == MTK_HNAT_V1_1) ? 0x3f : 0;
developer24948202021-11-24 17:38:27 +08001637
developeraf07fad2021-11-19 17:53:42 +08001638 if (qos_toggle) {
developer4164cfe2022-12-01 11:27:41 +08001639 if (hnat_priv->data->version == MTK_HNAT_V2 ||
1640 hnat_priv->data->version == MTK_HNAT_V3) {
developeraf07fad2021-11-19 17:53:42 +08001641 entry.ipv4_hnapt.iblk2.qid = qid & 0x7f;
1642 } else {
1643 /* qid[5:0]= port_mg[1:0]+ qid[3:0] */
1644 entry.ipv4_hnapt.iblk2.qid = qid & 0xf;
developer4164cfe2022-12-01 11:27:41 +08001645 if (hnat_priv->data->version != MTK_HNAT_V1_1)
developeraf07fad2021-11-19 17:53:42 +08001646 entry.ipv4_hnapt.iblk2.port_mg |=
1647 ((qid >> 4) & 0x3);
developerfd40db22021-04-29 10:08:25 +08001648
developerd35bbcc2022-09-28 22:46:01 +08001649 if (((IS_EXT(dev) && (FROM_GE_LAN_GRP(skb) ||
developeraf07fad2021-11-19 17:53:42 +08001650 FROM_GE_WAN(skb) || FROM_GE_VIRTUAL(skb))) ||
1651 ((mape_toggle && mape == 1) && !FROM_EXT(skb))) &&
1652 (!whnat)) {
1653 entry.ipv4_hnapt.etype = htons(HQOS_MAGIC_TAG);
1654 entry.ipv4_hnapt.vlan1 = skb_hnat_entry(skb);
1655 entry.bfib1.vlan_layer = 1;
1656 }
developerfd40db22021-04-29 10:08:25 +08001657 }
developerfd40db22021-04-29 10:08:25 +08001658
developer34028fb2022-01-11 13:51:29 +08001659 if (FROM_EXT(skb) || skb_hnat_sport(skb) == NR_QDMA_PORT ||
1660 (IS_PPPQ_MODE && !IS_DSA_LAN(dev) && !IS_DSA_WAN(dev)))
developeraf07fad2021-11-19 17:53:42 +08001661 entry.ipv4_hnapt.iblk2.fqos = 0;
1662 else
developerd35bbcc2022-09-28 22:46:01 +08001663#if defined(CONFIG_MEDIATEK_NETSYS_V3)
developer934756a2022-11-18 14:51:34 +08001664 if ((IS_HQOS_UL_MODE && IS_WAN(dev)) ||
developer493adc32022-11-29 22:34:18 +08001665 (IS_HQOS_DL_MODE && IS_LAN_GRP(dev)) ||
developer934756a2022-11-18 14:51:34 +08001666 (IS_PPPQ_MODE &&
1667 IS_PPPQ_PATH(dev, skb)))
developer47545a32022-11-15 16:06:58 +08001668 entry.ipv4_hnapt.tport_id = 1;
1669 else
1670 entry.ipv4_hnapt.tport_id = 0;
developerd35bbcc2022-09-28 22:46:01 +08001671#else
developer399ec072022-06-24 16:07:41 +08001672 entry.ipv4_hnapt.iblk2.fqos =
developer934756a2022-11-18 14:51:34 +08001673 (!IS_PPPQ_MODE ||
1674 (IS_PPPQ_MODE &&
1675 IS_PPPQ_PATH(dev, skb)));
developerd35bbcc2022-09-28 22:46:01 +08001676#endif
developeraf07fad2021-11-19 17:53:42 +08001677 } else {
developerfd40db22021-04-29 10:08:25 +08001678 entry.ipv4_hnapt.iblk2.fqos = 0;
developeraf07fad2021-11-19 17:53:42 +08001679 }
developerfd40db22021-04-29 10:08:25 +08001680 } else {
1681 entry.ipv6_5t_route.iblk2.dp = gmac;
1682 entry.ipv6_5t_route.iblk2.port_mg =
developer4164cfe2022-12-01 11:27:41 +08001683 (hnat_priv->data->version == MTK_HNAT_V1_1) ? 0x3f : 0;
developer24948202021-11-24 17:38:27 +08001684
developeraf07fad2021-11-19 17:53:42 +08001685 if (qos_toggle) {
developer4164cfe2022-12-01 11:27:41 +08001686 if (hnat_priv->data->version == MTK_HNAT_V2 ||
1687 hnat_priv->data->version == MTK_HNAT_V3) {
developeraf07fad2021-11-19 17:53:42 +08001688 entry.ipv6_5t_route.iblk2.qid = qid & 0x7f;
1689 } else {
1690 /* qid[5:0]= port_mg[1:0]+ qid[3:0] */
1691 entry.ipv6_5t_route.iblk2.qid = qid & 0xf;
developer4164cfe2022-12-01 11:27:41 +08001692 if (hnat_priv->data->version != MTK_HNAT_V1_1)
developeraf07fad2021-11-19 17:53:42 +08001693 entry.ipv6_5t_route.iblk2.port_mg |=
1694 ((qid >> 4) & 0x3);
developerfd40db22021-04-29 10:08:25 +08001695
developerd35bbcc2022-09-28 22:46:01 +08001696 if (IS_EXT(dev) && (FROM_GE_LAN_GRP(skb) ||
developeraf07fad2021-11-19 17:53:42 +08001697 FROM_GE_WAN(skb) || FROM_GE_VIRTUAL(skb)) &&
1698 (!whnat)) {
1699 entry.ipv6_5t_route.etype = htons(HQOS_MAGIC_TAG);
1700 entry.ipv6_5t_route.vlan1 = skb_hnat_entry(skb);
1701 entry.bfib1.vlan_layer = 1;
1702 }
developerfd40db22021-04-29 10:08:25 +08001703 }
developerfd40db22021-04-29 10:08:25 +08001704
developer34028fb2022-01-11 13:51:29 +08001705 if (FROM_EXT(skb) ||
1706 (IS_PPPQ_MODE && !IS_DSA_LAN(dev) && !IS_DSA_WAN(dev)))
developeraf07fad2021-11-19 17:53:42 +08001707 entry.ipv6_5t_route.iblk2.fqos = 0;
1708 else
developerd35bbcc2022-09-28 22:46:01 +08001709#if defined(CONFIG_MEDIATEK_NETSYS_V3)
developer934756a2022-11-18 14:51:34 +08001710 if ((IS_HQOS_UL_MODE && IS_WAN(dev)) ||
developer493adc32022-11-29 22:34:18 +08001711 (IS_HQOS_DL_MODE && IS_LAN_GRP(dev)) ||
developer934756a2022-11-18 14:51:34 +08001712 (IS_PPPQ_MODE &&
1713 IS_PPPQ_PATH(dev, skb)))
developer47545a32022-11-15 16:06:58 +08001714 entry.ipv6_5t_route.tport_id = 1;
1715 else
1716 entry.ipv6_5t_route.tport_id = 0;
developerd35bbcc2022-09-28 22:46:01 +08001717#else
developer399ec072022-06-24 16:07:41 +08001718 entry.ipv6_5t_route.iblk2.fqos =
developer934756a2022-11-18 14:51:34 +08001719 (!IS_PPPQ_MODE ||
1720 (IS_PPPQ_MODE &&
1721 IS_PPPQ_PATH(dev, skb)));
developerd35bbcc2022-09-28 22:46:01 +08001722#endif
developeraf07fad2021-11-19 17:53:42 +08001723 } else {
developerfd40db22021-04-29 10:08:25 +08001724 entry.ipv6_5t_route.iblk2.fqos = 0;
developeraf07fad2021-11-19 17:53:42 +08001725 }
developerfd40db22021-04-29 10:08:25 +08001726 }
1727
developer60e60962021-06-15 21:05:07 +08001728 /* The INFO2.port_mg and 2nd VLAN ID fields of PPE entry are redefined
1729 * by Wi-Fi whnat engine. These data and INFO2.dp will be updated and
1730 * the entry is set to BIND state in mtk_sw_nat_hook_tx().
1731 */
developer7b36dca2022-05-19 18:29:10 +08001732 if (!whnat) {
1733 entry.bfib1.ttl = 1;
developer60e60962021-06-15 21:05:07 +08001734 entry.bfib1.state = BIND;
developer7b36dca2022-05-19 18:29:10 +08001735 }
developer60e60962021-06-15 21:05:07 +08001736
developerbc552cc2022-03-15 16:19:27 +08001737 wmb();
developerfd40db22021-04-29 10:08:25 +08001738 memcpy(foe, &entry, sizeof(entry));
1739 /*reset statistic for this entry*/
developer577ad2f2022-11-28 10:33:36 +08001740 if (hnat_priv->data->per_flow_accounting &&
1741 skb_hnat_entry(skb) < hnat_priv->foe_etry_num &&
1742 skb_hnat_ppe(skb) < CFG_PPE_NUM)
developer471f6562021-05-10 20:48:34 +08001743 memset(&hnat_priv->acct[skb_hnat_ppe(skb)][skb_hnat_entry(skb)],
1744 0, sizeof(struct mib_entry));
developerfd40db22021-04-29 10:08:25 +08001745
developerfdfe1572021-09-13 16:56:33 +08001746 skb_hnat_filled(skb) = HNAT_INFO_FILLED;
developerfd40db22021-04-29 10:08:25 +08001747
1748 return 0;
1749}
1750
1751int mtk_sw_nat_hook_tx(struct sk_buff *skb, int gmac_no)
1752{
1753 struct foe_entry *entry;
1754 struct ethhdr *eth;
developerbc552cc2022-03-15 16:19:27 +08001755 struct hnat_bind_info_blk bfib1_tx;
developerfd40db22021-04-29 10:08:25 +08001756
developerfdfe1572021-09-13 16:56:33 +08001757 if (skb_hnat_alg(skb) || !is_hnat_info_filled(skb) ||
1758 !is_magic_tag_valid(skb) || !IS_SPACE_AVAILABLE_HEAD(skb))
developerfd40db22021-04-29 10:08:25 +08001759 return NF_ACCEPT;
1760
1761 trace_printk(
1762 "[%s]entry=%x reason=%x gmac_no=%x wdmaid=%x rxid=%x wcid=%x bssid=%x\n",
1763 __func__, skb_hnat_entry(skb), skb_hnat_reason(skb), gmac_no,
1764 skb_hnat_wdma_id(skb), skb_hnat_bss_id(skb),
1765 skb_hnat_wc_id(skb), skb_hnat_rx_id(skb));
1766
developer99506e52021-06-30 22:03:02 +08001767 if ((gmac_no != NR_WDMA0_PORT) && (gmac_no != NR_WDMA1_PORT) &&
1768 (gmac_no != NR_WHNAT_WDMA_PORT))
1769 return NF_ACCEPT;
1770
developerc0419aa2022-12-07 15:56:36 +08001771 if (unlikely(!skb_mac_header_was_set(skb)))
1772 return NF_ACCEPT;
1773
developerfd40db22021-04-29 10:08:25 +08001774 if (!skb_hnat_is_hashed(skb))
1775 return NF_ACCEPT;
1776
developer955a6f62021-07-26 10:54:39 +08001777 if (skb_hnat_entry(skb) >= hnat_priv->foe_etry_num ||
1778 skb_hnat_ppe(skb) >= CFG_PPE_NUM)
1779 return NF_ACCEPT;
1780
developer471f6562021-05-10 20:48:34 +08001781 entry = &hnat_priv->foe_table_cpu[skb_hnat_ppe(skb)][skb_hnat_entry(skb)];
developerfd40db22021-04-29 10:08:25 +08001782 if (entry_hnat_is_bound(entry))
1783 return NF_ACCEPT;
1784
1785 if (skb_hnat_reason(skb) != HIT_UNBIND_RATE_REACH)
1786 return NF_ACCEPT;
1787
1788 eth = eth_hdr(skb);
developerbc552cc2022-03-15 16:19:27 +08001789 memcpy(&bfib1_tx, &entry->bfib1, sizeof(entry->bfib1));
developer8116b0a2021-08-23 18:07:20 +08001790
1791 /*not bind multicast if PPE mcast not enable*/
developerfdfe1572021-09-13 16:56:33 +08001792 if (!hnat_priv->data->mcast) {
1793 if (is_multicast_ether_addr(eth->h_dest))
1794 return NF_ACCEPT;
1795
1796 if (IS_IPV4_GRP(entry))
1797 entry->ipv4_hnapt.iblk2.mcast = 0;
1798 else
1799 entry->ipv6_5t_route.iblk2.mcast = 0;
1800 }
developerfd40db22021-04-29 10:08:25 +08001801
1802 /* Some mt_wifi virtual interfaces, such as apcli,
1803 * will change the smac for specail purpose.
1804 */
developer5ffc5f12022-10-25 18:51:46 +08001805 switch ((int)bfib1_tx.pkt_type) {
developerfd40db22021-04-29 10:08:25 +08001806 case IPV4_HNAPT:
1807 case IPV4_HNAT:
1808 entry->ipv4_hnapt.smac_hi = swab32(*((u32 *)eth->h_source));
1809 entry->ipv4_hnapt.smac_lo = swab16(*((u16 *)&eth->h_source[4]));
1810 break;
1811 case IPV4_DSLITE:
1812 case IPV4_MAP_E:
1813 case IPV6_6RD:
1814 case IPV6_5T_ROUTE:
1815 case IPV6_3T_ROUTE:
developer5ffc5f12022-10-25 18:51:46 +08001816 case IPV6_HNAPT:
1817 case IPV6_HNAT:
developerfd40db22021-04-29 10:08:25 +08001818 entry->ipv6_5t_route.smac_hi = swab32(*((u32 *)eth->h_source));
1819 entry->ipv6_5t_route.smac_lo = swab16(*((u16 *)&eth->h_source[4]));
1820 break;
1821 }
1822
developer0ff76882021-10-26 10:54:13 +08001823 if (skb->vlan_tci) {
developerbc552cc2022-03-15 16:19:27 +08001824 bfib1_tx.vlan_layer = 1;
1825 bfib1_tx.vpm = 1;
developer0ff76882021-10-26 10:54:13 +08001826 if (IS_IPV4_GRP(entry)) {
1827 entry->ipv4_hnapt.etype = htons(ETH_P_8021Q);
developer00a07372022-03-11 16:04:34 +08001828 entry->ipv4_hnapt.vlan1 = skb->vlan_tci;
developer0ff76882021-10-26 10:54:13 +08001829 } else if (IS_IPV6_GRP(entry)) {
1830 entry->ipv6_5t_route.etype = htons(ETH_P_8021Q);
developer00a07372022-03-11 16:04:34 +08001831 entry->ipv6_5t_route.vlan1 = skb->vlan_tci;
developer0ff76882021-10-26 10:54:13 +08001832 }
1833 } else {
developerbc552cc2022-03-15 16:19:27 +08001834 bfib1_tx.vpm = 0;
1835 bfib1_tx.vlan_layer = 0;
developer0ff76882021-10-26 10:54:13 +08001836 }
developer60e60962021-06-15 21:05:07 +08001837
developerfd40db22021-04-29 10:08:25 +08001838 /* MT7622 wifi hw_nat not support QoS */
1839 if (IS_IPV4_GRP(entry)) {
1840 entry->ipv4_hnapt.iblk2.fqos = 0;
developer4164cfe2022-12-01 11:27:41 +08001841 if ((hnat_priv->data->version == MTK_HNAT_V1_2 &&
developere567ad32021-05-25 17:16:17 +08001842 gmac_no == NR_WHNAT_WDMA_PORT) ||
developer4164cfe2022-12-01 11:27:41 +08001843 ((hnat_priv->data->version == MTK_HNAT_V2 ||
1844 hnat_priv->data->version == MTK_HNAT_V3) &&
developere567ad32021-05-25 17:16:17 +08001845 (gmac_no == NR_WDMA0_PORT || gmac_no == NR_WDMA1_PORT))) {
developerfd40db22021-04-29 10:08:25 +08001846 entry->ipv4_hnapt.winfo.bssid = skb_hnat_bss_id(skb);
1847 entry->ipv4_hnapt.winfo.wcid = skb_hnat_wc_id(skb);
developerd35bbcc2022-09-28 22:46:01 +08001848#if defined(CONFIG_MEDIATEK_NETSYS_V3)
developer47545a32022-11-15 16:06:58 +08001849 entry->ipv4_hnapt.tport_id = IS_HQOS_DL_MODE ? 1 : 0;
developerd35bbcc2022-09-28 22:46:01 +08001850 entry->ipv4_hnapt.iblk2.rxid = skb_hnat_rx_id(skb);
1851 entry->ipv4_hnapt.iblk2.winfoi = 1;
1852 entry->ipv4_hnapt.winfo_pao.usr_info =
1853 skb_hnat_usr_info(skb);
1854 entry->ipv4_hnapt.winfo_pao.tid = skb_hnat_tid(skb);
1855 entry->ipv4_hnapt.winfo_pao.is_fixedrate =
1856 skb_hnat_is_fixedrate(skb);
1857 entry->ipv4_hnapt.winfo_pao.is_prior =
1858 skb_hnat_is_prior(skb);
1859 entry->ipv4_hnapt.winfo_pao.is_sp = skb_hnat_is_sp(skb);
1860 entry->ipv4_hnapt.winfo_pao.hf = skb_hnat_hf(skb);
1861 entry->ipv4_hnapt.winfo_pao.amsdu = skb_hnat_amsdu(skb);
1862#elif defined(CONFIG_MEDIATEK_NETSYS_V2)
developerfd40db22021-04-29 10:08:25 +08001863 entry->ipv4_hnapt.iblk2.rxid = skb_hnat_rx_id(skb);
1864 entry->ipv4_hnapt.iblk2.winfoi = 1;
1865#else
1866 entry->ipv4_hnapt.winfo.rxid = skb_hnat_rx_id(skb);
1867 entry->ipv4_hnapt.iblk2w.winfoi = 1;
1868 entry->ipv4_hnapt.iblk2w.wdmaid = skb_hnat_wdma_id(skb);
1869#endif
1870 } else {
1871 if (IS_GMAC1_MODE && !hnat_dsa_is_enable(hnat_priv)) {
developerbc552cc2022-03-15 16:19:27 +08001872 bfib1_tx.vpm = 1;
1873 bfib1_tx.vlan_layer = 1;
developerfd40db22021-04-29 10:08:25 +08001874
developerd35bbcc2022-09-28 22:46:01 +08001875 if (FROM_GE_LAN_GRP(skb))
developerfd40db22021-04-29 10:08:25 +08001876 entry->ipv4_hnapt.vlan1 = 1;
1877 else if (FROM_GE_WAN(skb) || FROM_GE_VIRTUAL(skb))
1878 entry->ipv4_hnapt.vlan1 = 2;
1879 }
1880
developer34028fb2022-01-11 13:51:29 +08001881 if (IS_HQOS_MODE &&
developerd35bbcc2022-09-28 22:46:01 +08001882 (FROM_GE_LAN_GRP(skb) || FROM_GE_WAN(skb) || FROM_GE_VIRTUAL(skb))) {
developerbc552cc2022-03-15 16:19:27 +08001883 bfib1_tx.vpm = 0;
1884 bfib1_tx.vlan_layer = 1;
developerfd40db22021-04-29 10:08:25 +08001885 entry->ipv4_hnapt.etype = htons(HQOS_MAGIC_TAG);
1886 entry->ipv4_hnapt.vlan1 = skb_hnat_entry(skb);
1887 entry->ipv4_hnapt.iblk2.fqos = 1;
1888 }
developerfd40db22021-04-29 10:08:25 +08001889 }
1890 entry->ipv4_hnapt.iblk2.dp = gmac_no;
developer5ffc5f12022-10-25 18:51:46 +08001891#if defined(CONFIG_MEDIATEK_NETSYS_V3)
1892 } else if (IS_IPV6_HNAPT(entry) || IS_IPV6_HNAT(entry)) {
1893 entry->ipv6_hnapt.iblk2.dp = gmac_no;
1894 entry->ipv6_hnapt.iblk2.rxid = skb_hnat_rx_id(skb);
1895 entry->ipv6_hnapt.iblk2.winfoi = 1;
1896
1897 entry->ipv6_hnapt.winfo.bssid = skb_hnat_bss_id(skb);
1898 entry->ipv6_hnapt.winfo.wcid = skb_hnat_wc_id(skb);
1899 entry->ipv6_hnapt.winfo_pao.usr_info = skb_hnat_usr_info(skb);
1900 entry->ipv6_hnapt.winfo_pao.tid = skb_hnat_tid(skb);
1901 entry->ipv6_hnapt.winfo_pao.is_fixedrate =
1902 skb_hnat_is_fixedrate(skb);
1903 entry->ipv6_hnapt.winfo_pao.is_prior = skb_hnat_is_prior(skb);
1904 entry->ipv6_hnapt.winfo_pao.is_sp = skb_hnat_is_sp(skb);
1905 entry->ipv6_hnapt.winfo_pao.hf = skb_hnat_hf(skb);
1906 entry->ipv6_hnapt.winfo_pao.amsdu = skb_hnat_amsdu(skb);
developer47545a32022-11-15 16:06:58 +08001907 entry->ipv6_hnapt.tport_id = IS_HQOS_DL_MODE ? 1 : 0;
developer5ffc5f12022-10-25 18:51:46 +08001908#endif
developerfd40db22021-04-29 10:08:25 +08001909 } else {
1910 entry->ipv6_5t_route.iblk2.fqos = 0;
developer4164cfe2022-12-01 11:27:41 +08001911 if ((hnat_priv->data->version == MTK_HNAT_V1_2 &&
developere567ad32021-05-25 17:16:17 +08001912 gmac_no == NR_WHNAT_WDMA_PORT) ||
developer4164cfe2022-12-01 11:27:41 +08001913 ((hnat_priv->data->version == MTK_HNAT_V2 ||
1914 hnat_priv->data->version == MTK_HNAT_V3) &&
developere567ad32021-05-25 17:16:17 +08001915 (gmac_no == NR_WDMA0_PORT || gmac_no == NR_WDMA1_PORT))) {
developerfd40db22021-04-29 10:08:25 +08001916 entry->ipv6_5t_route.winfo.bssid = skb_hnat_bss_id(skb);
1917 entry->ipv6_5t_route.winfo.wcid = skb_hnat_wc_id(skb);
developerd35bbcc2022-09-28 22:46:01 +08001918#if defined(CONFIG_MEDIATEK_NETSYS_V3)
developer47545a32022-11-15 16:06:58 +08001919 entry->ipv6_5t_route.tport_id = IS_HQOS_DL_MODE ? 1 : 0;
developerfd40db22021-04-29 10:08:25 +08001920 entry->ipv6_5t_route.iblk2.rxid = skb_hnat_rx_id(skb);
1921 entry->ipv6_5t_route.iblk2.winfoi = 1;
developerd35bbcc2022-09-28 22:46:01 +08001922 entry->ipv6_5t_route.winfo_pao.usr_info =
1923 skb_hnat_usr_info(skb);
1924 entry->ipv6_5t_route.winfo_pao.tid =
1925 skb_hnat_tid(skb);
1926 entry->ipv6_5t_route.winfo_pao.is_fixedrate =
1927 skb_hnat_is_fixedrate(skb);
1928 entry->ipv6_5t_route.winfo_pao.is_prior =
1929 skb_hnat_is_prior(skb);
1930 entry->ipv6_5t_route.winfo_pao.is_sp =
1931 skb_hnat_is_sp(skb);
1932 entry->ipv6_5t_route.winfo_pao.hf =
1933 skb_hnat_hf(skb);
1934 entry->ipv6_5t_route.winfo_pao.amsdu =
1935 skb_hnat_amsdu(skb);
1936#elif defined(CONFIG_MEDIATEK_NETSYS_V2)
1937 entry->ipv6_5t_route.iblk2.rxid = skb_hnat_rx_id(skb);
1938 entry->ipv6_5t_route.iblk2.winfoi = 1;
developerfd40db22021-04-29 10:08:25 +08001939#else
1940 entry->ipv6_5t_route.winfo.rxid = skb_hnat_rx_id(skb);
1941 entry->ipv6_5t_route.iblk2w.winfoi = 1;
1942 entry->ipv6_5t_route.iblk2w.wdmaid = skb_hnat_wdma_id(skb);
1943#endif
1944 } else {
1945 if (IS_GMAC1_MODE && !hnat_dsa_is_enable(hnat_priv)) {
developerbc552cc2022-03-15 16:19:27 +08001946 bfib1_tx.vpm = 1;
1947 bfib1_tx.vlan_layer = 1;
developerfd40db22021-04-29 10:08:25 +08001948
developerd35bbcc2022-09-28 22:46:01 +08001949 if (FROM_GE_LAN_GRP(skb))
developerfd40db22021-04-29 10:08:25 +08001950 entry->ipv6_5t_route.vlan1 = 1;
1951 else if (FROM_GE_WAN(skb) || FROM_GE_VIRTUAL(skb))
1952 entry->ipv6_5t_route.vlan1 = 2;
1953 }
1954
developer34028fb2022-01-11 13:51:29 +08001955 if (IS_HQOS_MODE &&
developerd35bbcc2022-09-28 22:46:01 +08001956 (FROM_GE_LAN_GRP(skb) || FROM_GE_WAN(skb) || FROM_GE_VIRTUAL(skb))) {
developerbc552cc2022-03-15 16:19:27 +08001957 bfib1_tx.vpm = 0;
1958 bfib1_tx.vlan_layer = 1;
developerfd40db22021-04-29 10:08:25 +08001959 entry->ipv6_5t_route.etype = htons(HQOS_MAGIC_TAG);
1960 entry->ipv6_5t_route.vlan1 = skb_hnat_entry(skb);
1961 entry->ipv6_5t_route.iblk2.fqos = 1;
1962 }
developerfd40db22021-04-29 10:08:25 +08001963 }
1964 entry->ipv6_5t_route.iblk2.dp = gmac_no;
1965 }
1966
developer7b36dca2022-05-19 18:29:10 +08001967 bfib1_tx.ttl = 1;
developerbc552cc2022-03-15 16:19:27 +08001968 bfib1_tx.state = BIND;
1969 wmb();
1970 memcpy(&entry->bfib1, &bfib1_tx, sizeof(bfib1_tx));
developerfd40db22021-04-29 10:08:25 +08001971
1972 return NF_ACCEPT;
1973}
1974
1975int mtk_sw_nat_hook_rx(struct sk_buff *skb)
1976{
developer99506e52021-06-30 22:03:02 +08001977 if (!IS_SPACE_AVAILABLE_HEAD(skb) || !FROM_WED(skb)) {
1978 skb_hnat_magic_tag(skb) = 0;
developerfd40db22021-04-29 10:08:25 +08001979 return NF_ACCEPT;
developer99506e52021-06-30 22:03:02 +08001980 }
developerfd40db22021-04-29 10:08:25 +08001981
1982 skb_hnat_alg(skb) = 0;
developerfdfe1572021-09-13 16:56:33 +08001983 skb_hnat_filled(skb) = 0;
developerfd40db22021-04-29 10:08:25 +08001984 skb_hnat_magic_tag(skb) = HNAT_MAGIC_TAG;
1985
1986 if (skb_hnat_iface(skb) == FOE_MAGIC_WED0)
1987 skb_hnat_sport(skb) = NR_WDMA0_PORT;
1988 else if (skb_hnat_iface(skb) == FOE_MAGIC_WED1)
1989 skb_hnat_sport(skb) = NR_WDMA1_PORT;
1990
1991 return NF_ACCEPT;
1992}
1993
1994void mtk_ppe_dev_register_hook(struct net_device *dev)
1995{
1996 int i, number = 0;
1997 struct extdev_entry *ext_entry;
1998
developerfd40db22021-04-29 10:08:25 +08001999 for (i = 1; i < MAX_IF_NUM; i++) {
2000 if (hnat_priv->wifi_hook_if[i] == dev) {
2001 pr_info("%s : %s has been registered in wifi_hook_if table[%d]\n",
2002 __func__, dev->name, i);
2003 return;
2004 }
developera7e6c242022-12-05 13:52:40 +08002005 }
2006
2007 for (i = 1; i < MAX_IF_NUM; i++) {
developerfd40db22021-04-29 10:08:25 +08002008 if (!hnat_priv->wifi_hook_if[i]) {
2009 if (find_extif_from_devname(dev->name)) {
2010 extif_set_dev(dev);
2011 goto add_wifi_hook_if;
2012 }
2013
2014 number = get_ext_device_number();
2015 if (number >= MAX_EXT_DEVS) {
2016 pr_info("%s : extdev array is full. %s is not registered\n",
2017 __func__, dev->name);
2018 return;
2019 }
2020
2021 ext_entry = kzalloc(sizeof(*ext_entry), GFP_KERNEL);
2022 if (!ext_entry)
2023 return;
2024
developer4c32b7a2021-11-13 16:46:43 +08002025 strncpy(ext_entry->name, dev->name, IFNAMSIZ - 1);
developerfd40db22021-04-29 10:08:25 +08002026 dev_hold(dev);
2027 ext_entry->dev = dev;
2028 ext_if_add(ext_entry);
2029
2030add_wifi_hook_if:
2031 dev_hold(dev);
2032 hnat_priv->wifi_hook_if[i] = dev;
2033
2034 break;
2035 }
2036 }
2037 pr_info("%s : ineterface %s register (%d)\n", __func__, dev->name, i);
2038}
2039
2040void mtk_ppe_dev_unregister_hook(struct net_device *dev)
2041{
2042 int i;
2043
2044 for (i = 1; i < MAX_IF_NUM; i++) {
2045 if (hnat_priv->wifi_hook_if[i] == dev) {
2046 hnat_priv->wifi_hook_if[i] = NULL;
2047 dev_put(dev);
2048
2049 break;
2050 }
2051 }
2052
2053 extif_put_dev(dev);
2054 pr_info("%s : ineterface %s set null (%d)\n", __func__, dev->name, i);
2055}
2056
2057static unsigned int mtk_hnat_accel_type(struct sk_buff *skb)
2058{
2059 struct dst_entry *dst;
2060 struct nf_conn *ct;
2061 enum ip_conntrack_info ctinfo;
2062 const struct nf_conn_help *help;
2063
2064 /* Do not accelerate 1st round of xfrm flow, and 2nd round of xfrm flow
2065 * is from local_out which is also filtered in sanity check.
2066 */
2067 dst = skb_dst(skb);
2068 if (dst && dst_xfrm(dst))
2069 return 0;
2070
2071 ct = nf_ct_get(skb, &ctinfo);
2072 if (!ct)
2073 return 1;
2074
2075 /* rcu_read_lock()ed by nf_hook_slow */
2076 help = nfct_help(ct);
2077 if (help && rcu_dereference(help->helper))
2078 return 0;
2079
2080 return 1;
2081}
2082
developer6f4a0c72021-10-19 10:04:22 +08002083static void mtk_hnat_dscp_update(struct sk_buff *skb, struct foe_entry *entry)
2084{
2085 struct iphdr *iph;
2086 struct ethhdr *eth;
2087 struct ipv6hdr *ip6h;
2088 bool flag = false;
2089
2090 eth = eth_hdr(skb);
2091 switch (ntohs(eth->h_proto)) {
2092 case ETH_P_IP:
2093 iph = ip_hdr(skb);
developer001e7be2021-12-09 15:00:27 +08002094 if (IS_IPV4_GRP(entry) && entry->ipv4_hnapt.iblk2.dscp != iph->tos)
developer6f4a0c72021-10-19 10:04:22 +08002095 flag = true;
2096 break;
2097 case ETH_P_IPV6:
2098 ip6h = ipv6_hdr(skb);
developer001e7be2021-12-09 15:00:27 +08002099 if ((IS_IPV6_3T_ROUTE(entry) || IS_IPV6_5T_ROUTE(entry)) &&
2100 (entry->ipv6_5t_route.iblk2.dscp !=
2101 (ip6h->priority << 4 | (ip6h->flow_lbl[0] >> 4))))
developer6f4a0c72021-10-19 10:04:22 +08002102 flag = true;
2103 break;
2104 default:
2105 return;
2106 }
2107
2108 if (flag) {
developer1080dd82022-03-07 19:31:04 +08002109 if (debug_level >= 2)
2110 pr_info("Delete entry idx=%d.\n", skb_hnat_entry(skb));
developer6f4a0c72021-10-19 10:04:22 +08002111 memset(entry, 0, sizeof(struct foe_entry));
2112 hnat_cache_ebl(1);
2113 }
2114}
2115
developer30a47682021-11-02 17:06:14 +08002116static void mtk_hnat_nf_update(struct sk_buff *skb)
2117{
2118 struct nf_conn *ct;
2119 struct nf_conn_acct *acct;
2120 struct nf_conn_counter *counter;
2121 enum ip_conntrack_info ctinfo;
2122 struct hnat_accounting diff;
2123
2124 ct = nf_ct_get(skb, &ctinfo);
2125 if (ct) {
2126 if (!hnat_get_count(hnat_priv, skb_hnat_ppe(skb), skb_hnat_entry(skb), &diff))
2127 return;
2128
2129 acct = nf_conn_acct_find(ct);
2130 if (acct) {
2131 counter = acct->counter;
2132 atomic64_add(diff.packets, &counter[CTINFO2DIR(ctinfo)].packets);
2133 atomic64_add(diff.bytes, &counter[CTINFO2DIR(ctinfo)].bytes);
2134 }
2135 }
developere8b7dfa2023-04-20 10:16:44 +08002136}
2137
2138int mtk_464xlat_fill_mac(struct foe_entry *entry, struct sk_buff *skb,
2139 const struct net_device *out, bool l2w)
2140{
2141 const struct in6_addr *ipv6_nexthop;
2142 struct dst_entry *dst = skb_dst(skb);
2143 struct neighbour *neigh = NULL;
2144 struct rtable *rt = (struct rtable *)dst;
2145 u32 nexthop;
2146
2147 rcu_read_lock_bh();
2148 if (l2w) {
2149 ipv6_nexthop = rt6_nexthop((struct rt6_info *)dst,
2150 &ipv6_hdr(skb)->daddr);
2151 neigh = __ipv6_neigh_lookup_noref(dst->dev, ipv6_nexthop);
2152 if (unlikely(!neigh)) {
2153 dev_notice(hnat_priv->dev, "%s:No neigh (daddr=%pI6)\n",
2154 __func__, &ipv6_hdr(skb)->daddr);
2155 rcu_read_unlock_bh();
2156 return -1;
2157 }
2158 } else {
2159 nexthop = (__force u32)rt_nexthop(rt, ip_hdr(skb)->daddr);
2160 neigh = __ipv4_neigh_lookup_noref(dst->dev, nexthop);
2161 if (unlikely(!neigh)) {
2162 dev_notice(hnat_priv->dev, "%s:No neigh (daddr=%pI4)\n",
2163 __func__, &ip_hdr(skb)->daddr);
2164 rcu_read_unlock_bh();
2165 return -1;
2166 }
2167 }
2168 rcu_read_unlock_bh();
2169
2170 entry->ipv4_dslite.dmac_hi = swab32(*((u32 *)neigh->ha));
2171 entry->ipv4_dslite.dmac_lo = swab16(*((u16 *)&neigh->ha[4]));
2172 entry->ipv4_dslite.smac_hi = swab32(*((u32 *)out->dev_addr));
2173 entry->ipv4_dslite.smac_lo = swab16(*((u16 *)&out->dev_addr[4]));
2174
2175 return 0;
2176}
2177
2178int mtk_464xlat_get_hash(struct sk_buff *skb, u32 *hash, bool l2w)
2179{
2180 struct in6_addr addr_v6, prefix;
2181 struct ipv6hdr *ip6h;
2182 struct iphdr *iph;
2183 struct tcpudphdr *pptr, _ports;
2184 struct foe_entry tmp;
2185 u32 addr, protoff;
2186
2187 if (l2w) {
2188 ip6h = ipv6_hdr(skb);
2189 if (mtk_ppe_get_xlat_v4_by_v6(&ip6h->daddr, &addr))
2190 return -1;
2191 protoff = IPV6_HDR_LEN;
2192
2193 tmp.bfib1.pkt_type = IPV4_HNAPT;
2194 tmp.ipv4_hnapt.sip = ntohl(ip6h->saddr.s6_addr32[3]);
2195 tmp.ipv4_hnapt.dip = ntohl(addr);
2196 } else {
2197 iph = ip_hdr(skb);
2198 if (mtk_ppe_get_xlat_v6_by_v4(&iph->saddr, &addr_v6, &prefix))
2199 return -1;
2200
2201 protoff = iph->ihl * 4;
2202
2203 tmp.bfib1.pkt_type = IPV6_5T_ROUTE;
2204 tmp.ipv6_5t_route.ipv6_sip0 = ntohl(addr_v6.s6_addr32[0]);
2205 tmp.ipv6_5t_route.ipv6_sip1 = ntohl(addr_v6.s6_addr32[1]);
2206 tmp.ipv6_5t_route.ipv6_sip2 = ntohl(addr_v6.s6_addr32[2]);
2207 tmp.ipv6_5t_route.ipv6_sip3 = ntohl(addr_v6.s6_addr32[3]);
2208 tmp.ipv6_5t_route.ipv6_dip0 = ntohl(prefix.s6_addr32[0]);
2209 tmp.ipv6_5t_route.ipv6_dip1 = ntohl(prefix.s6_addr32[1]);
2210 tmp.ipv6_5t_route.ipv6_dip2 = ntohl(prefix.s6_addr32[2]);
2211 tmp.ipv6_5t_route.ipv6_dip3 = ntohl(iph->daddr);
2212 }
2213
2214 pptr = skb_header_pointer(skb, protoff,
2215 sizeof(_ports), &_ports);
2216 if (unlikely(!pptr))
2217 return -1;
2218
2219 if (l2w) {
2220 tmp.ipv4_hnapt.sport = ntohs(pptr->src);
2221 tmp.ipv4_hnapt.dport = ntohs(pptr->dst);
2222 } else {
2223 tmp.ipv6_5t_route.sport = ntohs(pptr->src);
2224 tmp.ipv6_5t_route.dport = ntohs(pptr->dst);
2225 }
2226
2227 *hash = hnat_get_ppe_hash(&tmp);
2228
2229 return 0;
2230}
2231
2232void mtk_464xlat_fill_info1(struct foe_entry *entry,
2233 struct sk_buff *skb, bool l2w)
2234{
2235 entry->bfib1.cah = 1;
2236 entry->bfib1.ttl = 1;
2237 entry->bfib1.state = BIND;
2238 entry->bfib1.time_stamp = readl(hnat_priv->fe_base + 0x0010) & (0xFF);
2239 if (l2w) {
2240 entry->bfib1.pkt_type = IPV4_DSLITE;
2241 entry->bfib1.udp = ipv6_hdr(skb)->nexthdr ==
2242 IPPROTO_UDP ? 1 : 0;
2243 } else {
2244 entry->bfib1.pkt_type = IPV6_6RD;
2245 entry->bfib1.udp = ip_hdr(skb)->protocol ==
2246 IPPROTO_UDP ? 1 : 0;
2247 }
2248}
2249
2250void mtk_464xlat_fill_info2(struct foe_entry *entry, bool l2w)
2251{
2252 entry->ipv4_dslite.iblk2.mibf = 1;
2253 entry->ipv4_dslite.iblk2.port_ag = 0xF;
2254
2255 if (l2w)
2256 entry->ipv4_dslite.iblk2.dp = NR_GMAC2_PORT;
2257 else
2258 entry->ipv6_6rd.iblk2.dp = NR_GMAC1_PORT;
2259}
2260
2261void mtk_464xlat_fill_ipv4(struct foe_entry *entry, struct sk_buff *skb,
2262 struct foe_entry *foe, bool l2w)
2263{
2264 struct iphdr *iph;
2265
2266 if (l2w) {
2267 entry->ipv4_dslite.sip = foe->ipv4_dslite.sip;
2268 entry->ipv4_dslite.dip = foe->ipv4_dslite.dip;
2269 entry->ipv4_dslite.sport = foe->ipv4_dslite.sport;
2270 entry->ipv4_dslite.dport = foe->ipv4_dslite.dport;
2271 } else {
2272 iph = ip_hdr(skb);
2273 entry->ipv6_6rd.tunnel_sipv4 = ntohl(iph->saddr);
2274 entry->ipv6_6rd.tunnel_dipv4 = ntohl(iph->daddr);
2275 entry->ipv6_6rd.sport = foe->ipv6_6rd.sport;
2276 entry->ipv6_6rd.dport = foe->ipv6_6rd.dport;
2277 entry->ipv6_6rd.hdr_chksum = ppe_get_chkbase(iph);
2278 entry->ipv6_6rd.ttl = iph->ttl;
2279 entry->ipv6_6rd.dscp = iph->tos;
2280 entry->ipv6_6rd.flag = (ntohs(iph->frag_off) >> 13);
2281 }
2282}
2283
2284int mtk_464xlat_fill_ipv6(struct foe_entry *entry, struct sk_buff *skb,
2285 struct foe_entry *foe, bool l2w)
2286{
2287 struct ipv6hdr *ip6h;
2288 struct in6_addr addr_v6, prefix;
2289 u32 addr;
2290
2291 if (l2w) {
2292 ip6h = ipv6_hdr(skb);
2293
2294 if (mtk_ppe_get_xlat_v4_by_v6(&ip6h->daddr, &addr))
2295 return -1;
2296
2297 if (mtk_ppe_get_xlat_v6_by_v4(&addr, &addr_v6, &prefix))
2298 return -1;
2299
2300 entry->ipv4_dslite.tunnel_sipv6_0 =
2301 ntohl(prefix.s6_addr32[0]);
2302 entry->ipv4_dslite.tunnel_sipv6_1 =
2303 ntohl(ip6h->saddr.s6_addr32[1]);
2304 entry->ipv4_dslite.tunnel_sipv6_2 =
2305 ntohl(ip6h->saddr.s6_addr32[2]);
2306 entry->ipv4_dslite.tunnel_sipv6_3 =
2307 ntohl(ip6h->saddr.s6_addr32[3]);
2308 entry->ipv4_dslite.tunnel_dipv6_0 =
2309 ntohl(ip6h->daddr.s6_addr32[0]);
2310 entry->ipv4_dslite.tunnel_dipv6_1 =
2311 ntohl(ip6h->daddr.s6_addr32[1]);
2312 entry->ipv4_dslite.tunnel_dipv6_2 =
2313 ntohl(ip6h->daddr.s6_addr32[2]);
2314 entry->ipv4_dslite.tunnel_dipv6_3 =
2315 ntohl(ip6h->daddr.s6_addr32[3]);
2316
2317 ppe_fill_flow_lbl(entry, ip6h);
2318 entry->ipv4_dslite.priority = ip6h->priority;
2319 entry->ipv4_dslite.hop_limit = ip6h->hop_limit;
2320
2321 } else {
2322 entry->ipv6_6rd.ipv6_sip0 = foe->ipv6_6rd.ipv6_sip0;
2323 entry->ipv6_6rd.ipv6_sip1 = foe->ipv6_6rd.ipv6_sip1;
2324 entry->ipv6_6rd.ipv6_sip2 = foe->ipv6_6rd.ipv6_sip2;
2325 entry->ipv6_6rd.ipv6_sip3 = foe->ipv6_6rd.ipv6_sip3;
2326 entry->ipv6_6rd.ipv6_dip0 = foe->ipv6_6rd.ipv6_dip0;
2327 entry->ipv6_6rd.ipv6_dip1 = foe->ipv6_6rd.ipv6_dip1;
2328 entry->ipv6_6rd.ipv6_dip2 = foe->ipv6_6rd.ipv6_dip2;
2329 entry->ipv6_6rd.ipv6_dip3 = foe->ipv6_6rd.ipv6_dip3;
2330 }
2331
2332 return 0;
2333}
2334
2335int mtk_464xlat_fill_l2(struct foe_entry *entry, struct sk_buff *skb,
2336 const struct net_device *dev, bool l2w)
2337{
2338 const unsigned int *port_reg;
2339 int port_index;
2340 u16 sp_tag;
2341
2342 if (l2w)
2343 entry->ipv4_dslite.etype = ETH_P_IP;
2344 else {
2345 if (IS_DSA_LAN(dev)) {
2346 port_reg = of_get_property(dev->dev.of_node,
2347 "reg", NULL);
2348 if (unlikely(!port_reg))
2349 return -1;
2350
2351 port_index = be32_to_cpup(port_reg);
2352 sp_tag = BIT(port_index);
2353
2354 entry->bfib1.vlan_layer = 1;
2355 entry->bfib1.vpm = 0;
2356 entry->ipv6_6rd.etype = sp_tag;
2357 } else
2358 entry->ipv6_6rd.etype = ETH_P_IPV6;
2359 }
2360
2361 if (mtk_464xlat_fill_mac(entry, skb, dev, l2w))
2362 return -1;
2363
2364 return 0;
developer30a47682021-11-02 17:06:14 +08002365}
2366
developere8b7dfa2023-04-20 10:16:44 +08002367
2368int mtk_464xlat_fill_l3(struct foe_entry *entry, struct sk_buff *skb,
2369 struct foe_entry *foe, bool l2w)
2370{
2371 mtk_464xlat_fill_ipv4(entry, skb, foe, l2w);
2372
2373 if (mtk_464xlat_fill_ipv6(entry, skb, foe, l2w))
2374 return -1;
2375
2376 return 0;
2377}
2378
2379int mtk_464xlat_post_process(struct sk_buff *skb, const struct net_device *out)
2380{
2381 struct foe_entry *foe, entry = {};
2382 u32 hash;
2383 bool l2w;
2384
2385 if (skb->protocol == htons(ETH_P_IPV6))
2386 l2w = true;
2387 else if (skb->protocol == htons(ETH_P_IP))
2388 l2w = false;
2389 else
2390 return -1;
2391
2392 if (mtk_464xlat_get_hash(skb, &hash, l2w))
2393 return -1;
2394
2395 if (hash >= hnat_priv->foe_etry_num)
2396 return -1;
2397
2398 if (headroom[hash].crsn != HIT_UNBIND_RATE_REACH)
2399 return -1;
2400
2401 foe = &hnat_priv->foe_table_cpu[headroom_ppe(headroom[hash])][hash];
2402
2403 mtk_464xlat_fill_info1(&entry, skb, l2w);
2404
2405 if (mtk_464xlat_fill_l3(&entry, skb, foe, l2w))
2406 return -1;
2407
2408 mtk_464xlat_fill_info2(&entry, l2w);
2409
2410 if (mtk_464xlat_fill_l2(&entry, skb, out, l2w))
2411 return -1;
2412
2413 /* We must ensure all info has been updated before set to hw */
2414 wmb();
2415 memcpy(foe, &entry, sizeof(struct foe_entry));
2416
2417 return 0;
2418}
2419
developerfd40db22021-04-29 10:08:25 +08002420static unsigned int mtk_hnat_nf_post_routing(
2421 struct sk_buff *skb, const struct net_device *out,
2422 unsigned int (*fn)(struct sk_buff *, const struct net_device *,
2423 struct flow_offload_hw_path *),
2424 const char *func)
2425{
2426 struct foe_entry *entry;
2427 struct flow_offload_hw_path hw_path = { .dev = (struct net_device*)out,
developere5763512021-05-21 01:04:58 +08002428 .virt_dev = (struct net_device*)out };
developerfd40db22021-04-29 10:08:25 +08002429 const struct net_device *arp_dev = out;
2430
developere8b7dfa2023-04-20 10:16:44 +08002431 if (xlat_toggle && !mtk_464xlat_post_process(skb, out))
2432 return 0;
2433
developerfd40db22021-04-29 10:08:25 +08002434 if (skb_hnat_alg(skb) || unlikely(!is_magic_tag_valid(skb) ||
2435 !IS_SPACE_AVAILABLE_HEAD(skb)))
2436 return 0;
2437
developerc0419aa2022-12-07 15:56:36 +08002438 if (unlikely(!skb_mac_header_was_set(skb)))
2439 return 0;
2440
developerfd40db22021-04-29 10:08:25 +08002441 if (unlikely(!skb_hnat_is_hashed(skb)))
2442 return 0;
2443
2444 if (out->netdev_ops->ndo_flow_offload_check) {
developere5763512021-05-21 01:04:58 +08002445 out->netdev_ops->ndo_flow_offload_check(&hw_path);
developerfd40db22021-04-29 10:08:25 +08002446 out = (IS_GMAC1_MODE) ? hw_path.virt_dev : hw_path.dev;
2447 }
2448
developerd35bbcc2022-09-28 22:46:01 +08002449 if (!IS_LAN_GRP(out) && !IS_WAN(out) && !IS_EXT(out))
developerfd40db22021-04-29 10:08:25 +08002450 return 0;
2451
2452 trace_printk("[%s] case hit, %x-->%s, reason=%x\n", __func__,
2453 skb_hnat_iface(skb), out->name, skb_hnat_reason(skb));
2454
developer577ad2f2022-11-28 10:33:36 +08002455 if (skb_hnat_entry(skb) >= hnat_priv->foe_etry_num ||
2456 skb_hnat_ppe(skb) >= CFG_PPE_NUM)
2457 return -1;
2458
developer471f6562021-05-10 20:48:34 +08002459 entry = &hnat_priv->foe_table_cpu[skb_hnat_ppe(skb)][skb_hnat_entry(skb)];
developerfd40db22021-04-29 10:08:25 +08002460
2461 switch (skb_hnat_reason(skb)) {
2462 case HIT_UNBIND_RATE_REACH:
2463 if (entry_hnat_is_bound(entry))
2464 break;
2465
2466 if (fn && !mtk_hnat_accel_type(skb))
2467 break;
2468
2469 if (fn && fn(skb, arp_dev, &hw_path))
2470 break;
2471
2472 skb_to_hnat_info(skb, out, entry, &hw_path);
2473 break;
2474 case HIT_BIND_KEEPALIVE_DUP_OLD_HDR:
developer30a47682021-11-02 17:06:14 +08002475 /* update hnat count to nf_conntrack by keepalive */
2476 if (hnat_priv->data->per_flow_accounting && hnat_priv->nf_stat_en)
2477 mtk_hnat_nf_update(skb);
2478
developerfd40db22021-04-29 10:08:25 +08002479 if (fn && !mtk_hnat_accel_type(skb))
2480 break;
2481
developer6f4a0c72021-10-19 10:04:22 +08002482 /* update dscp for qos */
2483 mtk_hnat_dscp_update(skb, entry);
2484
developerfd40db22021-04-29 10:08:25 +08002485 /* update mcast timestamp*/
developer4164cfe2022-12-01 11:27:41 +08002486 if (hnat_priv->data->version == MTK_HNAT_V1_3 &&
developerfd40db22021-04-29 10:08:25 +08002487 hnat_priv->data->mcast && entry->bfib1.sta == 1)
2488 entry->ipv4_hnapt.m_timestamp = foe_timestamp(hnat_priv);
2489
2490 if (entry_hnat_is_bound(entry)) {
2491 memset(skb_hnat_info(skb), 0, FOE_INFO_LEN);
2492
2493 return -1;
2494 }
2495 break;
2496 case HIT_BIND_MULTICAST_TO_CPU:
2497 case HIT_BIND_MULTICAST_TO_GMAC_CPU:
2498 /*do not forward to gdma again,if ppe already done it*/
developerd35bbcc2022-09-28 22:46:01 +08002499 if (IS_LAN_GRP(out) || IS_WAN(out))
developerfd40db22021-04-29 10:08:25 +08002500 return -1;
2501 break;
2502 }
2503
2504 return 0;
2505}
2506
2507static unsigned int
2508mtk_hnat_ipv6_nf_local_out(void *priv, struct sk_buff *skb,
2509 const struct nf_hook_state *state)
2510{
2511 struct foe_entry *entry;
2512 struct ipv6hdr *ip6h;
2513 struct iphdr _iphdr;
2514 const struct iphdr *iph;
2515 struct tcpudphdr _ports;
2516 const struct tcpudphdr *pptr;
2517 int udp = 0;
2518
2519 if (unlikely(!skb_hnat_is_hashed(skb)))
2520 return NF_ACCEPT;
2521
developer577ad2f2022-11-28 10:33:36 +08002522 if (skb_hnat_entry(skb) >= hnat_priv->foe_etry_num ||
2523 skb_hnat_ppe(skb) >= CFG_PPE_NUM)
2524 return NF_ACCEPT;
2525
developer471f6562021-05-10 20:48:34 +08002526 entry = &hnat_priv->foe_table_cpu[skb_hnat_ppe(skb)][skb_hnat_entry(skb)];
developerfd40db22021-04-29 10:08:25 +08002527 if (skb_hnat_reason(skb) == HIT_UNBIND_RATE_REACH) {
2528 ip6h = ipv6_hdr(skb);
2529 if (ip6h->nexthdr == NEXTHDR_IPIP) {
2530 /* Map-E LAN->WAN: need to record orig info before fn. */
2531 if (mape_toggle) {
2532 iph = skb_header_pointer(skb, IPV6_HDR_LEN,
2533 sizeof(_iphdr), &_iphdr);
developer4c32b7a2021-11-13 16:46:43 +08002534 if (unlikely(!iph))
2535 return NF_ACCEPT;
2536
developerfd40db22021-04-29 10:08:25 +08002537 switch (iph->protocol) {
2538 case IPPROTO_UDP:
2539 udp = 1;
2540 case IPPROTO_TCP:
2541 break;
2542
2543 default:
2544 return NF_ACCEPT;
2545 }
2546
2547 pptr = skb_header_pointer(skb, IPV6_HDR_LEN + iph->ihl * 4,
2548 sizeof(_ports), &_ports);
developer4c32b7a2021-11-13 16:46:43 +08002549 if (unlikely(!pptr))
2550 return NF_ACCEPT;
2551
developerfd40db22021-04-29 10:08:25 +08002552 entry->bfib1.udp = udp;
2553
developer25fc8c02022-05-06 16:24:02 +08002554 /* Map-E LAN->WAN record inner IPv4 header info. */
developerd35bbcc2022-09-28 22:46:01 +08002555#if defined(CONFIG_MEDIATEK_NETSYS_V2) || defined(CONFIG_MEDIATEK_NETSYS_V3)
developerfd40db22021-04-29 10:08:25 +08002556 entry->bfib1.pkt_type = IPV4_MAP_E;
2557 entry->ipv4_dslite.iblk2.dscp = iph->tos;
developerd35bbcc2022-09-28 22:46:01 +08002558 entry->ipv4_mape.new_sip = ntohl(iph->saddr);
2559 entry->ipv4_mape.new_dip = ntohl(iph->daddr);
2560 entry->ipv4_mape.new_sport = ntohs(pptr->src);
2561 entry->ipv4_mape.new_dport = ntohs(pptr->dst);
developerfd40db22021-04-29 10:08:25 +08002562#else
2563 entry->ipv4_hnapt.iblk2.dscp = iph->tos;
2564 entry->ipv4_hnapt.new_sip = ntohl(iph->saddr);
2565 entry->ipv4_hnapt.new_dip = ntohl(iph->daddr);
2566 entry->ipv4_hnapt.new_sport = ntohs(pptr->src);
2567 entry->ipv4_hnapt.new_dport = ntohs(pptr->dst);
2568#endif
2569 } else {
2570 entry->bfib1.pkt_type = IPV4_DSLITE;
2571 }
2572 }
2573 }
2574 return NF_ACCEPT;
2575}
2576
2577static unsigned int
2578mtk_hnat_ipv6_nf_post_routing(void *priv, struct sk_buff *skb,
2579 const struct nf_hook_state *state)
2580{
developer577ad2f2022-11-28 10:33:36 +08002581 if (!skb)
2582 goto drop;
2583
developerfd40db22021-04-29 10:08:25 +08002584 post_routing_print(skb, state->in, state->out, __func__);
2585
2586 if (!mtk_hnat_nf_post_routing(skb, state->out, hnat_ipv6_get_nexthop,
2587 __func__))
2588 return NF_ACCEPT;
2589
developer577ad2f2022-11-28 10:33:36 +08002590drop:
2591 if (skb)
2592 trace_printk(
2593 "%s:drop (iif=0x%x, out_dev=%s, CB2=0x%x, ppe_hash=0x%x,\n"
2594 "sport=0x%x, reason=0x%x, alg=0x%x)\n",
2595 __func__, skb_hnat_iface(skb), state->out->name,
2596 HNAT_SKB_CB2(skb)->magic, skb_hnat_entry(skb),
2597 skb_hnat_sport(skb), skb_hnat_reason(skb),
2598 skb_hnat_alg(skb));
developerfd40db22021-04-29 10:08:25 +08002599
2600 return NF_DROP;
2601}
2602
2603static unsigned int
2604mtk_hnat_ipv4_nf_post_routing(void *priv, struct sk_buff *skb,
2605 const struct nf_hook_state *state)
2606{
developer577ad2f2022-11-28 10:33:36 +08002607 if (!skb)
2608 goto drop;
2609
developerfd40db22021-04-29 10:08:25 +08002610 post_routing_print(skb, state->in, state->out, __func__);
2611
2612 if (!mtk_hnat_nf_post_routing(skb, state->out, hnat_ipv4_get_nexthop,
2613 __func__))
2614 return NF_ACCEPT;
2615
developer577ad2f2022-11-28 10:33:36 +08002616drop:
2617 if (skb)
2618 trace_printk(
2619 "%s:drop (iif=0x%x, out_dev=%s, CB2=0x%x, ppe_hash=0x%x,\n"
2620 "sport=0x%x, reason=0x%x, alg=0x%x)\n",
2621 __func__, skb_hnat_iface(skb), state->out->name,
2622 HNAT_SKB_CB2(skb)->magic, skb_hnat_entry(skb),
2623 skb_hnat_sport(skb), skb_hnat_reason(skb),
2624 skb_hnat_alg(skb));
developerfd40db22021-04-29 10:08:25 +08002625
2626 return NF_DROP;
2627}
2628
2629static unsigned int
2630mtk_pong_hqos_handler(void *priv, struct sk_buff *skb,
2631 const struct nf_hook_state *state)
2632{
developer659fdeb2022-12-01 23:03:07 +08002633 struct vlan_ethhdr *veth;
2634
2635 if (!skb)
2636 goto drop;
2637
2638 veth = (struct vlan_ethhdr *)skb_mac_header(skb);
developerfd40db22021-04-29 10:08:25 +08002639
developer34028fb2022-01-11 13:51:29 +08002640 if (IS_HQOS_MODE && eth_hdr(skb)->h_proto == HQOS_MAGIC_TAG) {
developerfd40db22021-04-29 10:08:25 +08002641 skb_hnat_entry(skb) = ntohs(veth->h_vlan_TCI) & 0x3fff;
2642 skb_hnat_reason(skb) = HIT_BIND_FORCE_TO_CPU;
2643 }
developerfd40db22021-04-29 10:08:25 +08002644
2645 if (skb_hnat_iface(skb) == FOE_MAGIC_EXT)
2646 clr_from_extge(skb);
2647
2648 /* packets from external devices -> xxx ,step 2, learning stage */
developeraf07fad2021-11-19 17:53:42 +08002649 if (do_ext2ge_fast_learn(state->in, skb) && (!qos_toggle ||
2650 (qos_toggle && eth_hdr(skb)->h_proto != HQOS_MAGIC_TAG))) {
developerfd40db22021-04-29 10:08:25 +08002651 if (!do_hnat_ext_to_ge2(skb, __func__))
2652 return NF_STOLEN;
2653 goto drop;
2654 }
2655
2656 /* packets form ge -> external device */
2657 if (do_ge2ext_fast(state->in, skb)) {
2658 if (!do_hnat_ge_to_ext(skb, __func__))
2659 return NF_STOLEN;
2660 goto drop;
2661 }
2662
2663 return NF_ACCEPT;
developer577ad2f2022-11-28 10:33:36 +08002664
developerfd40db22021-04-29 10:08:25 +08002665drop:
developer577ad2f2022-11-28 10:33:36 +08002666 if (skb)
2667 printk_ratelimited(KERN_WARNING
2668 "%s:drop (in_dev=%s, iif=0x%x, CB2=0x%x, ppe_hash=0x%x,\n"
2669 "sport=0x%x, reason=0x%x, alg=0x%x)\n",
2670 __func__, state->in->name, skb_hnat_iface(skb),
2671 HNAT_SKB_CB2(skb)->magic, skb_hnat_entry(skb),
2672 skb_hnat_sport(skb), skb_hnat_reason(skb),
2673 skb_hnat_alg(skb));
developerfd40db22021-04-29 10:08:25 +08002674
2675 return NF_DROP;
2676}
2677
2678static unsigned int
2679mtk_hnat_br_nf_local_out(void *priv, struct sk_buff *skb,
2680 const struct nf_hook_state *state)
2681{
developer577ad2f2022-11-28 10:33:36 +08002682 if (!skb)
2683 goto drop;
2684
developerfd40db22021-04-29 10:08:25 +08002685 post_routing_print(skb, state->in, state->out, __func__);
2686
2687 if (!mtk_hnat_nf_post_routing(skb, state->out, 0, __func__))
2688 return NF_ACCEPT;
2689
developer577ad2f2022-11-28 10:33:36 +08002690drop:
2691 if (skb)
2692 trace_printk(
2693 "%s:drop (iif=0x%x, out_dev=%s, CB2=0x%x, ppe_hash=0x%x,\n"
2694 "sport=0x%x, reason=0x%x, alg=0x%x)\n",
2695 __func__, skb_hnat_iface(skb), state->out->name,
2696 HNAT_SKB_CB2(skb)->magic, skb_hnat_entry(skb),
2697 skb_hnat_sport(skb), skb_hnat_reason(skb),
2698 skb_hnat_alg(skb));
developerfd40db22021-04-29 10:08:25 +08002699
2700 return NF_DROP;
2701}
2702
2703static unsigned int
2704mtk_hnat_ipv4_nf_local_out(void *priv, struct sk_buff *skb,
2705 const struct nf_hook_state *state)
2706{
2707 struct sk_buff *new_skb;
2708 struct foe_entry *entry;
2709 struct iphdr *iph;
2710
2711 if (!skb_hnat_is_hashed(skb))
2712 return NF_ACCEPT;
2713
developer577ad2f2022-11-28 10:33:36 +08002714 if (skb_hnat_entry(skb) >= hnat_priv->foe_etry_num ||
2715 skb_hnat_ppe(skb) >= CFG_PPE_NUM)
2716 return NF_ACCEPT;
2717
developer471f6562021-05-10 20:48:34 +08002718 entry = &hnat_priv->foe_table_cpu[skb_hnat_ppe(skb)][skb_hnat_entry(skb)];
developerfd40db22021-04-29 10:08:25 +08002719
2720 if (unlikely(skb_headroom(skb) < FOE_INFO_LEN)) {
2721 new_skb = skb_realloc_headroom(skb, FOE_INFO_LEN);
2722 if (!new_skb) {
2723 dev_info(hnat_priv->dev, "%s:drop\n", __func__);
2724 return NF_DROP;
2725 }
2726 dev_kfree_skb(skb);
2727 skb = new_skb;
2728 }
2729
2730 /* Make the flow from local not be bound. */
2731 iph = ip_hdr(skb);
2732 if (iph->protocol == IPPROTO_IPV6) {
2733 entry->udib1.pkt_type = IPV6_6RD;
2734 hnat_set_head_frags(state, skb, 0, hnat_set_alg);
2735 } else {
2736 hnat_set_head_frags(state, skb, 1, hnat_set_alg);
2737 }
2738
2739 return NF_ACCEPT;
2740}
2741
2742static unsigned int mtk_hnat_br_nf_forward(void *priv,
2743 struct sk_buff *skb,
2744 const struct nf_hook_state *state)
2745{
developer4164cfe2022-12-01 11:27:41 +08002746 if ((hnat_priv->data->version == MTK_HNAT_V1_2) &&
developer99506e52021-06-30 22:03:02 +08002747 unlikely(IS_EXT(state->in) && IS_EXT(state->out)))
developerfd40db22021-04-29 10:08:25 +08002748 hnat_set_head_frags(state, skb, 1, hnat_set_alg);
2749
2750 return NF_ACCEPT;
2751}
2752
2753static struct nf_hook_ops mtk_hnat_nf_ops[] __read_mostly = {
2754 {
2755 .hook = mtk_hnat_ipv4_nf_pre_routing,
2756 .pf = NFPROTO_IPV4,
2757 .hooknum = NF_INET_PRE_ROUTING,
2758 .priority = NF_IP_PRI_FIRST + 1,
2759 },
2760 {
2761 .hook = mtk_hnat_ipv6_nf_pre_routing,
2762 .pf = NFPROTO_IPV6,
2763 .hooknum = NF_INET_PRE_ROUTING,
2764 .priority = NF_IP_PRI_FIRST + 1,
2765 },
2766 {
2767 .hook = mtk_hnat_ipv6_nf_post_routing,
2768 .pf = NFPROTO_IPV6,
2769 .hooknum = NF_INET_POST_ROUTING,
2770 .priority = NF_IP_PRI_LAST,
2771 },
2772 {
2773 .hook = mtk_hnat_ipv6_nf_local_out,
2774 .pf = NFPROTO_IPV6,
2775 .hooknum = NF_INET_LOCAL_OUT,
2776 .priority = NF_IP_PRI_LAST,
2777 },
2778 {
2779 .hook = mtk_hnat_ipv4_nf_post_routing,
2780 .pf = NFPROTO_IPV4,
2781 .hooknum = NF_INET_POST_ROUTING,
2782 .priority = NF_IP_PRI_LAST,
2783 },
2784 {
2785 .hook = mtk_hnat_ipv4_nf_local_out,
2786 .pf = NFPROTO_IPV4,
2787 .hooknum = NF_INET_LOCAL_OUT,
2788 .priority = NF_IP_PRI_LAST,
2789 },
2790 {
2791 .hook = mtk_hnat_br_nf_local_in,
2792 .pf = NFPROTO_BRIDGE,
2793 .hooknum = NF_BR_LOCAL_IN,
2794 .priority = NF_BR_PRI_FIRST,
2795 },
2796 {
2797 .hook = mtk_hnat_br_nf_local_out,
2798 .pf = NFPROTO_BRIDGE,
2799 .hooknum = NF_BR_LOCAL_OUT,
2800 .priority = NF_BR_PRI_LAST - 1,
2801 },
2802 {
2803 .hook = mtk_pong_hqos_handler,
2804 .pf = NFPROTO_BRIDGE,
2805 .hooknum = NF_BR_PRE_ROUTING,
developer2b85f652021-08-19 16:09:50 +08002806 .priority = NF_BR_PRI_FIRST + 1,
developerfd40db22021-04-29 10:08:25 +08002807 },
2808};
2809
2810int hnat_register_nf_hooks(void)
2811{
2812 return nf_register_net_hooks(&init_net, mtk_hnat_nf_ops, ARRAY_SIZE(mtk_hnat_nf_ops));
2813}
2814
2815void hnat_unregister_nf_hooks(void)
2816{
2817 nf_unregister_net_hooks(&init_net, mtk_hnat_nf_ops, ARRAY_SIZE(mtk_hnat_nf_ops));
2818}
2819
2820int whnat_adjust_nf_hooks(void)
2821{
2822 struct nf_hook_ops *hook = mtk_hnat_nf_ops;
2823 unsigned int n = ARRAY_SIZE(mtk_hnat_nf_ops);
2824
developerfd40db22021-04-29 10:08:25 +08002825 while (n-- > 0) {
2826 if (hook[n].hook == mtk_hnat_br_nf_local_in) {
2827 hook[n].hooknum = NF_BR_PRE_ROUTING;
developer2b85f652021-08-19 16:09:50 +08002828 hook[n].priority = NF_BR_PRI_FIRST + 1;
developerfd40db22021-04-29 10:08:25 +08002829 } else if (hook[n].hook == mtk_hnat_br_nf_local_out) {
2830 hook[n].hooknum = NF_BR_POST_ROUTING;
2831 } else if (hook[n].hook == mtk_pong_hqos_handler) {
2832 hook[n].hook = mtk_hnat_br_nf_forward;
2833 hook[n].hooknum = NF_BR_FORWARD;
2834 hook[n].priority = NF_BR_PRI_LAST - 1;
2835 }
2836 }
2837
2838 return 0;
2839}
2840
developerfd40db22021-04-29 10:08:25 +08002841int mtk_hqos_ptype_cb(struct sk_buff *skb, struct net_device *dev,
2842 struct packet_type *pt, struct net_device *unused)
2843{
2844 struct vlan_ethhdr *veth = (struct vlan_ethhdr *)skb_mac_header(skb);
2845
2846 skb_hnat_entry(skb) = ntohs(veth->h_vlan_TCI) & 0x3fff;
2847 skb_hnat_reason(skb) = HIT_BIND_FORCE_TO_CPU;
2848
developer659fdeb2022-12-01 23:03:07 +08002849 if (do_hnat_ge_to_ext(skb, __func__) == -1)
2850 return 1;
developerfd40db22021-04-29 10:08:25 +08002851
2852 return 0;
2853}
developerfd40db22021-04-29 10:08:25 +08002854