blob: 0de1740568dcb6a326305c4855e9d9cbd90e79ec [file] [log] [blame]
developerfd40db22021-04-29 10:08:25 +08001/* This program is free software; you can redistribute it and/or modify
2 * it under the terms of the GNU General Public License as published by
3 * the Free Software Foundation; version 2 of the License
4 *
5 * This program is distributed in the hope that it will be useful,
6 * but WITHOUT ANY WARRANTY; without even the implied warranty of
7 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
8 * GNU General Public License for more details.
9 *
10 * Copyright (C) 2014-2016 Sean Wang <sean.wang@mediatek.com>
11 * Copyright (C) 2016-2017 John Crispin <blogic@openwrt.org>
12 */
13
14#include <linux/netfilter_bridge.h>
15#include <linux/netfilter_ipv6.h>
16
17#include <net/arp.h>
18#include <net/neighbour.h>
19#include <net/netfilter/nf_conntrack_helper.h>
20#include <net/netfilter/nf_flow_table.h>
21#include <net/ipv6.h>
22#include <net/ip6_route.h>
23#include <net/ip.h>
24#include <net/tcp.h>
25#include <net/udp.h>
developer30a47682021-11-02 17:06:14 +080026#include <net/netfilter/nf_conntrack.h>
27#include <net/netfilter/nf_conntrack_acct.h>
developerfd40db22021-04-29 10:08:25 +080028
29#include "nf_hnat_mtk.h"
30#include "hnat.h"
31
32#include "../mtk_eth_soc.h"
developer8051e042022-04-08 13:26:36 +080033#include "../mtk_eth_reset.h"
developerfd40db22021-04-29 10:08:25 +080034
35#define do_ge2ext_fast(dev, skb) \
developerd35bbcc2022-09-28 22:46:01 +080036 ((IS_LAN_GRP(dev) || IS_WAN(dev) || IS_PPD(dev)) && \
developerfd40db22021-04-29 10:08:25 +080037 skb_hnat_is_hashed(skb) && \
38 skb_hnat_reason(skb) == HIT_BIND_FORCE_TO_CPU)
39#define do_ext2ge_fast_learn(dev, skb) \
40 (IS_PPD(dev) && \
41 (skb_hnat_sport(skb) == NR_PDMA_PORT || \
42 skb_hnat_sport(skb) == NR_QDMA_PORT) && \
43 ((get_dev_from_index(skb->vlan_tci & VLAN_VID_MASK)) || \
44 get_wandev_from_index(skb->vlan_tci & VLAN_VID_MASK)))
45#define do_mape_w2l_fast(dev, skb) \
46 (mape_toggle && IS_WAN(dev) && (!is_from_mape(skb)))
47
48static struct ipv6hdr mape_l2w_v6h;
49static struct ipv6hdr mape_w2l_v6h;
50static inline uint8_t get_wifi_hook_if_index_from_dev(const struct net_device *dev)
51{
52 int i;
53
54 for (i = 1; i < MAX_IF_NUM; i++) {
55 if (hnat_priv->wifi_hook_if[i] == dev)
56 return i;
57 }
58
59 return 0;
60}
61
62static inline int get_ext_device_number(void)
63{
64 int i, number = 0;
65
66 for (i = 0; i < MAX_EXT_DEVS && hnat_priv->ext_if[i]; i++)
67 number += 1;
68 return number;
69}
70
71static inline int find_extif_from_devname(const char *name)
72{
73 int i;
74 struct extdev_entry *ext_entry;
75
76 for (i = 0; i < MAX_EXT_DEVS && hnat_priv->ext_if[i]; i++) {
77 ext_entry = hnat_priv->ext_if[i];
78 if (!strcmp(name, ext_entry->name))
79 return 1;
80 }
81 return 0;
82}
83
84static inline int get_index_from_dev(const struct net_device *dev)
85{
86 int i;
87 struct extdev_entry *ext_entry;
88
89 for (i = 0; i < MAX_EXT_DEVS && hnat_priv->ext_if[i]; i++) {
90 ext_entry = hnat_priv->ext_if[i];
91 if (dev == ext_entry->dev)
92 return ext_entry->dev->ifindex;
93 }
94 return 0;
95}
96
97static inline struct net_device *get_dev_from_index(int index)
98{
99 int i;
100 struct extdev_entry *ext_entry;
101 struct net_device *dev = 0;
102
103 for (i = 0; i < MAX_EXT_DEVS && hnat_priv->ext_if[i]; i++) {
104 ext_entry = hnat_priv->ext_if[i];
105 if (ext_entry->dev && index == ext_entry->dev->ifindex) {
106 dev = ext_entry->dev;
107 break;
108 }
109 }
110 return dev;
111}
112
113static inline struct net_device *get_wandev_from_index(int index)
114{
developer8c9c0d02021-06-18 16:15:37 +0800115 if (!hnat_priv->g_wandev)
116 hnat_priv->g_wandev = dev_get_by_name(&init_net, hnat_priv->wan);
developerfd40db22021-04-29 10:08:25 +0800117
developer8c9c0d02021-06-18 16:15:37 +0800118 if (hnat_priv->g_wandev && hnat_priv->g_wandev->ifindex == index)
119 return hnat_priv->g_wandev;
developerfd40db22021-04-29 10:08:25 +0800120 return NULL;
121}
122
123static inline int extif_set_dev(struct net_device *dev)
124{
125 int i;
126 struct extdev_entry *ext_entry;
127
128 for (i = 0; i < MAX_EXT_DEVS && hnat_priv->ext_if[i]; i++) {
129 ext_entry = hnat_priv->ext_if[i];
130 if (!strcmp(dev->name, ext_entry->name) && !ext_entry->dev) {
131 dev_hold(dev);
132 ext_entry->dev = dev;
133 pr_info("%s(%s)\n", __func__, dev->name);
134
135 return ext_entry->dev->ifindex;
136 }
137 }
138
139 return -1;
140}
141
142static inline int extif_put_dev(struct net_device *dev)
143{
144 int i;
145 struct extdev_entry *ext_entry;
146
147 for (i = 0; i < MAX_EXT_DEVS && hnat_priv->ext_if[i]; i++) {
148 ext_entry = hnat_priv->ext_if[i];
149 if (ext_entry->dev == dev) {
150 ext_entry->dev = NULL;
151 dev_put(dev);
152 pr_info("%s(%s)\n", __func__, dev->name);
153
developerbc53e5f2021-05-21 10:07:17 +0800154 return 0;
developerfd40db22021-04-29 10:08:25 +0800155 }
156 }
157
158 return -1;
159}
160
161int ext_if_add(struct extdev_entry *ext_entry)
162{
163 int len = get_ext_device_number();
164
developer4c32b7a2021-11-13 16:46:43 +0800165 if (len < MAX_EXT_DEVS)
166 hnat_priv->ext_if[len++] = ext_entry;
167
developerfd40db22021-04-29 10:08:25 +0800168 return len;
169}
170
171int ext_if_del(struct extdev_entry *ext_entry)
172{
173 int i, j;
174
175 for (i = 0; i < MAX_EXT_DEVS; i++) {
176 if (hnat_priv->ext_if[i] == ext_entry) {
177 for (j = i; hnat_priv->ext_if[j] && j < MAX_EXT_DEVS - 1; j++)
178 hnat_priv->ext_if[j] = hnat_priv->ext_if[j + 1];
179 hnat_priv->ext_if[j] = NULL;
180 break;
181 }
182 }
183
184 return i;
185}
186
187void foe_clear_all_bind_entries(struct net_device *dev)
188{
developer471f6562021-05-10 20:48:34 +0800189 int i, hash_index;
developerfd40db22021-04-29 10:08:25 +0800190 struct foe_entry *entry;
191
developerd35bbcc2022-09-28 22:46:01 +0800192 if (!IS_LAN_GRP(dev) && !IS_WAN(dev) &&
developerfd40db22021-04-29 10:08:25 +0800193 !find_extif_from_devname(dev->name) &&
194 !dev->netdev_ops->ndo_flow_offload_check)
195 return;
196
developer471f6562021-05-10 20:48:34 +0800197 for (i = 0; i < CFG_PPE_NUM; i++) {
198 cr_set_field(hnat_priv->ppe_base[i] + PPE_TB_CFG,
199 SMA, SMA_ONLY_FWD_CPU);
200
201 for (hash_index = 0; hash_index < hnat_priv->foe_etry_num; hash_index++) {
202 entry = hnat_priv->foe_table_cpu[i] + hash_index;
203 if (entry->bfib1.state == BIND) {
204 entry->ipv4_hnapt.udib1.state = INVALID;
205 entry->ipv4_hnapt.udib1.time_stamp =
206 readl((hnat_priv->fe_base + 0x0010)) & 0xFF;
207 }
developerfd40db22021-04-29 10:08:25 +0800208 }
209 }
210
211 /* clear HWNAT cache */
212 hnat_cache_ebl(1);
213
214 mod_timer(&hnat_priv->hnat_sma_build_entry_timer, jiffies + 3 * HZ);
215}
216
217static void gmac_ppe_fwd_enable(struct net_device *dev)
218{
219 if (IS_LAN(dev) || IS_GMAC1_MODE)
developerd35bbcc2022-09-28 22:46:01 +0800220 set_gmac_ppe_fwd(NR_GMAC1_PORT, 1);
developerfd40db22021-04-29 10:08:25 +0800221 else if (IS_WAN(dev))
developerd35bbcc2022-09-28 22:46:01 +0800222 set_gmac_ppe_fwd(NR_GMAC2_PORT, 1);
223 else if (IS_LAN2(dev))
224 set_gmac_ppe_fwd(NR_GMAC3_PORT, 1);
developerfd40db22021-04-29 10:08:25 +0800225}
226
227int nf_hnat_netdevice_event(struct notifier_block *unused, unsigned long event,
228 void *ptr)
229{
230 struct net_device *dev;
231
232 dev = netdev_notifier_info_to_dev(ptr);
233
234 switch (event) {
235 case NETDEV_UP:
236 gmac_ppe_fwd_enable(dev);
237
238 extif_set_dev(dev);
239
240 break;
241 case NETDEV_GOING_DOWN:
242 if (!get_wifi_hook_if_index_from_dev(dev))
243 extif_put_dev(dev);
244
245 foe_clear_all_bind_entries(dev);
246
247 break;
developer8c9c0d02021-06-18 16:15:37 +0800248 case NETDEV_UNREGISTER:
developer1901f412022-01-04 17:22:00 +0800249 if (hnat_priv->g_ppdev == dev) {
developer8c9c0d02021-06-18 16:15:37 +0800250 hnat_priv->g_ppdev = NULL;
251 dev_put(dev);
252 }
developer1901f412022-01-04 17:22:00 +0800253 if (hnat_priv->g_wandev == dev) {
developer8c9c0d02021-06-18 16:15:37 +0800254 hnat_priv->g_wandev = NULL;
255 dev_put(dev);
256 }
257
258 break;
259 case NETDEV_REGISTER:
260 if (IS_PPD(dev) && !hnat_priv->g_ppdev)
261 hnat_priv->g_ppdev = dev_get_by_name(&init_net, hnat_priv->ppd);
262 if (IS_WAN(dev) && !hnat_priv->g_wandev)
263 hnat_priv->g_wandev = dev_get_by_name(&init_net, hnat_priv->wan);
264
265 break;
developer8051e042022-04-08 13:26:36 +0800266 case MTK_FE_RESET_NAT_DONE:
267 pr_info("[%s] HNAT driver starts to do warm init !\n", __func__);
268 hnat_warm_init();
269 break;
developerfd40db22021-04-29 10:08:25 +0800270 default:
271 break;
272 }
273
274 return NOTIFY_DONE;
275}
276
277void foe_clear_entry(struct neighbour *neigh)
278{
279 u32 *daddr = (u32 *)neigh->primary_key;
280 unsigned char h_dest[ETH_ALEN];
281 struct foe_entry *entry;
developer471f6562021-05-10 20:48:34 +0800282 int i, hash_index;
developerfd40db22021-04-29 10:08:25 +0800283 u32 dip;
284
285 dip = (u32)(*daddr);
286
developer471f6562021-05-10 20:48:34 +0800287 for (i = 0; i < CFG_PPE_NUM; i++) {
developer8051e042022-04-08 13:26:36 +0800288 if (!hnat_priv->foe_table_cpu[i])
289 continue;
290
developer471f6562021-05-10 20:48:34 +0800291 for (hash_index = 0; hash_index < hnat_priv->foe_etry_num; hash_index++) {
292 entry = hnat_priv->foe_table_cpu[i] + hash_index;
293 if (entry->bfib1.state == BIND &&
294 entry->ipv4_hnapt.new_dip == ntohl(dip)) {
295 *((u32 *)h_dest) = swab32(entry->ipv4_hnapt.dmac_hi);
296 *((u16 *)&h_dest[4]) =
297 swab16(entry->ipv4_hnapt.dmac_lo);
298 if (strncmp(h_dest, neigh->ha, ETH_ALEN) != 0) {
299 pr_info("%s: state=%d\n", __func__,
300 neigh->nud_state);
301 cr_set_field(hnat_priv->ppe_base[i] + PPE_TB_CFG,
302 SMA, SMA_ONLY_FWD_CPU);
developerfd40db22021-04-29 10:08:25 +0800303
developer471f6562021-05-10 20:48:34 +0800304 entry->ipv4_hnapt.udib1.state = INVALID;
305 entry->ipv4_hnapt.udib1.time_stamp =
306 readl((hnat_priv->fe_base + 0x0010)) & 0xFF;
developerfd40db22021-04-29 10:08:25 +0800307
developer471f6562021-05-10 20:48:34 +0800308 /* clear HWNAT cache */
309 hnat_cache_ebl(1);
developerfd40db22021-04-29 10:08:25 +0800310
developer471f6562021-05-10 20:48:34 +0800311 mod_timer(&hnat_priv->hnat_sma_build_entry_timer,
312 jiffies + 3 * HZ);
developerfd40db22021-04-29 10:08:25 +0800313
developer471f6562021-05-10 20:48:34 +0800314 pr_info("Delete old entry: dip =%pI4\n", &dip);
315 pr_info("Old mac= %pM\n", h_dest);
316 pr_info("New mac= %pM\n", neigh->ha);
317 }
developerfd40db22021-04-29 10:08:25 +0800318 }
319 }
320 }
321}
322
323int nf_hnat_netevent_handler(struct notifier_block *unused, unsigned long event,
324 void *ptr)
325{
326 struct net_device *dev = NULL;
327 struct neighbour *neigh = NULL;
328
329 switch (event) {
330 case NETEVENT_NEIGH_UPDATE:
331 neigh = ptr;
332 dev = neigh->dev;
333 if (dev)
334 foe_clear_entry(neigh);
335 break;
336 }
337
338 return NOTIFY_DONE;
339}
340
341unsigned int mape_add_ipv6_hdr(struct sk_buff *skb, struct ipv6hdr mape_ip6h)
342{
343 struct ethhdr *eth = NULL;
344 struct ipv6hdr *ip6h = NULL;
345 struct iphdr *iph = NULL;
346
347 if (skb_headroom(skb) < IPV6_HDR_LEN || skb_shared(skb) ||
348 (skb_cloned(skb) && !skb_clone_writable(skb, 0))) {
349 return -1;
350 }
351
352 /* point to L3 */
353 memcpy(skb->data - IPV6_HDR_LEN - ETH_HLEN, skb_push(skb, ETH_HLEN), ETH_HLEN);
354 memcpy(skb_push(skb, IPV6_HDR_LEN - ETH_HLEN), &mape_ip6h, IPV6_HDR_LEN);
355
356 eth = (struct ethhdr *)(skb->data - ETH_HLEN);
357 eth->h_proto = htons(ETH_P_IPV6);
358 skb->protocol = htons(ETH_P_IPV6);
359
360 iph = (struct iphdr *)(skb->data + IPV6_HDR_LEN);
361 ip6h = (struct ipv6hdr *)(skb->data);
362 ip6h->payload_len = iph->tot_len; /* maybe different with ipv4 */
363
364 skb_set_network_header(skb, 0);
365 skb_set_transport_header(skb, iph->ihl * 4 + IPV6_HDR_LEN);
366 return 0;
367}
368
369static void fix_skb_packet_type(struct sk_buff *skb, struct net_device *dev,
370 struct ethhdr *eth)
371{
372 skb->pkt_type = PACKET_HOST;
373 if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
374 if (ether_addr_equal_64bits(eth->h_dest, dev->broadcast))
375 skb->pkt_type = PACKET_BROADCAST;
376 else
377 skb->pkt_type = PACKET_MULTICAST;
378 }
379}
380
381unsigned int do_hnat_ext_to_ge(struct sk_buff *skb, const struct net_device *in,
382 const char *func)
383{
384 if (hnat_priv->g_ppdev && hnat_priv->g_ppdev->flags & IFF_UP) {
385 u16 vlan_id = 0;
386 skb_set_network_header(skb, 0);
387 skb_push(skb, ETH_HLEN);
388 set_to_ppe(skb);
389
390 vlan_id = skb_vlan_tag_get_id(skb);
391 if (vlan_id) {
392 skb = vlan_insert_tag(skb, skb->vlan_proto, skb->vlan_tci);
393 if (!skb)
394 return -1;
395 }
396
397 /*set where we come from*/
398 skb->vlan_proto = htons(ETH_P_8021Q);
399 skb->vlan_tci =
400 (VLAN_CFI_MASK | (in->ifindex & VLAN_VID_MASK));
401 trace_printk(
402 "%s: vlan_prot=0x%x, vlan_tci=%x, in->name=%s, skb->dev->name=%s\n",
403 __func__, ntohs(skb->vlan_proto), skb->vlan_tci,
404 in->name, hnat_priv->g_ppdev->name);
405 skb->dev = hnat_priv->g_ppdev;
406 dev_queue_xmit(skb);
407 trace_printk("%s: called from %s successfully\n", __func__, func);
408 return 0;
409 }
410
411 trace_printk("%s: called from %s fail\n", __func__, func);
412 return -1;
413}
414
415unsigned int do_hnat_ext_to_ge2(struct sk_buff *skb, const char *func)
416{
417 struct ethhdr *eth = eth_hdr(skb);
418 struct net_device *dev;
419 struct foe_entry *entry;
420
421 trace_printk("%s: vlan_prot=0x%x, vlan_tci=%x\n", __func__,
422 ntohs(skb->vlan_proto), skb->vlan_tci);
423
424 dev = get_dev_from_index(skb->vlan_tci & VLAN_VID_MASK);
425
426 if (dev) {
427 /*set where we to go*/
428 skb->dev = dev;
429 skb->vlan_proto = 0;
430 skb->vlan_tci = 0;
431
432 if (ntohs(eth->h_proto) == ETH_P_8021Q) {
433 skb = skb_vlan_untag(skb);
434 if (unlikely(!skb))
435 return -1;
436 }
437
438 if (IS_BOND_MODE &&
developerd35bbcc2022-09-28 22:46:01 +0800439 (((hnat_priv->data->version == MTK_HNAT_V4 ||
440 hnat_priv->data->version == MTK_HNAT_V5) &&
developerfd40db22021-04-29 10:08:25 +0800441 (skb_hnat_entry(skb) != 0x7fff)) ||
developerd35bbcc2022-09-28 22:46:01 +0800442 ((hnat_priv->data->version != MTK_HNAT_V4 &&
443 hnat_priv->data->version != MTK_HNAT_V5) &&
developerfd40db22021-04-29 10:08:25 +0800444 (skb_hnat_entry(skb) != 0x3fff))))
445 skb_set_hash(skb, skb_hnat_entry(skb) >> 1, PKT_HASH_TYPE_L4);
446
447 set_from_extge(skb);
448 fix_skb_packet_type(skb, skb->dev, eth);
449 netif_rx(skb);
450 trace_printk("%s: called from %s successfully\n", __func__,
451 func);
452 return 0;
453 } else {
454 /* MapE WAN --> LAN/WLAN PingPong. */
455 dev = get_wandev_from_index(skb->vlan_tci & VLAN_VID_MASK);
456 if (mape_toggle && dev) {
457 if (!mape_add_ipv6_hdr(skb, mape_w2l_v6h)) {
458 skb_set_mac_header(skb, -ETH_HLEN);
459 skb->dev = dev;
460 set_from_mape(skb);
461 skb->vlan_proto = 0;
462 skb->vlan_tci = 0;
463 fix_skb_packet_type(skb, skb->dev, eth_hdr(skb));
developer471f6562021-05-10 20:48:34 +0800464 entry = &hnat_priv->foe_table_cpu[skb_hnat_ppe(skb)][skb_hnat_entry(skb)];
developerfd40db22021-04-29 10:08:25 +0800465 entry->bfib1.pkt_type = IPV4_HNAPT;
466 netif_rx(skb);
467 return 0;
468 }
469 }
470 trace_printk("%s: called from %s fail\n", __func__, func);
471 return -1;
472 }
473}
474
475unsigned int do_hnat_ge_to_ext(struct sk_buff *skb, const char *func)
476{
477 /*set where we to go*/
478 u8 index;
479 struct foe_entry *entry;
480 struct net_device *dev;
481
developer471f6562021-05-10 20:48:34 +0800482 entry = &hnat_priv->foe_table_cpu[skb_hnat_ppe(skb)][skb_hnat_entry(skb)];
developerfd40db22021-04-29 10:08:25 +0800483
484 if (IS_IPV4_GRP(entry))
485 index = entry->ipv4_hnapt.act_dp;
486 else
487 index = entry->ipv6_5t_route.act_dp;
488
489 skb->dev = get_dev_from_index(index);
490
developer34028fb2022-01-11 13:51:29 +0800491 if (IS_HQOS_MODE && eth_hdr(skb)->h_proto == HQOS_MAGIC_TAG) {
developerfd40db22021-04-29 10:08:25 +0800492 skb = skb_unshare(skb, GFP_ATOMIC);
493 if (!skb)
494 return NF_ACCEPT;
495
496 if (unlikely(!pskb_may_pull(skb, VLAN_HLEN)))
497 return NF_ACCEPT;
498
499 skb_pull_rcsum(skb, VLAN_HLEN);
500
501 memmove(skb->data - ETH_HLEN, skb->data - ETH_HLEN - VLAN_HLEN,
502 2 * ETH_ALEN);
503 }
developerfd40db22021-04-29 10:08:25 +0800504
505 if (skb->dev) {
506 skb_set_network_header(skb, 0);
507 skb_push(skb, ETH_HLEN);
508 dev_queue_xmit(skb);
509 trace_printk("%s: called from %s successfully\n", __func__,
510 func);
511 return 0;
512 } else {
513 if (mape_toggle) {
514 /* Add ipv6 header mape for lan/wlan -->wan */
515 dev = get_wandev_from_index(index);
516 if (dev) {
517 if (!mape_add_ipv6_hdr(skb, mape_l2w_v6h)) {
518 skb_set_network_header(skb, 0);
519 skb_push(skb, ETH_HLEN);
520 skb_set_mac_header(skb, 0);
521 skb->dev = dev;
522 dev_queue_xmit(skb);
523 return 0;
524 }
525 trace_printk("%s: called from %s fail[MapE]\n", __func__,
526 func);
527 return -1;
528 }
529 }
530 }
531 /*if external devices is down, invalidate related ppe entry*/
532 if (entry_hnat_is_bound(entry)) {
533 entry->bfib1.state = INVALID;
534 if (IS_IPV4_GRP(entry))
535 entry->ipv4_hnapt.act_dp = 0;
536 else
537 entry->ipv6_5t_route.act_dp = 0;
538
539 /* clear HWNAT cache */
540 hnat_cache_ebl(1);
541 }
542 trace_printk("%s: called from %s fail, index=%x\n", __func__,
543 func, index);
544 return -1;
545}
546
547static void pre_routing_print(struct sk_buff *skb, const struct net_device *in,
548 const struct net_device *out, const char *func)
549{
550 trace_printk(
551 "[%s]: %s(iif=0x%x CB2=0x%x)-->%s (ppe_hash=0x%x) sport=0x%x reason=0x%x alg=0x%x from %s\n",
552 __func__, in->name, skb_hnat_iface(skb),
553 HNAT_SKB_CB2(skb)->magic, out->name, skb_hnat_entry(skb),
554 skb_hnat_sport(skb), skb_hnat_reason(skb), skb_hnat_alg(skb),
555 func);
556}
557
558static void post_routing_print(struct sk_buff *skb, const struct net_device *in,
559 const struct net_device *out, const char *func)
560{
561 trace_printk(
562 "[%s]: %s(iif=0x%x, CB2=0x%x)-->%s (ppe_hash=0x%x) sport=0x%x reason=0x%x alg=0x%x from %s\n",
563 __func__, in->name, skb_hnat_iface(skb),
564 HNAT_SKB_CB2(skb)->magic, out->name, skb_hnat_entry(skb),
565 skb_hnat_sport(skb), skb_hnat_reason(skb), skb_hnat_alg(skb),
566 func);
567}
568
569static inline void hnat_set_iif(const struct nf_hook_state *state,
570 struct sk_buff *skb, int val)
571{
developer40017972021-06-29 14:27:35 +0800572 if (IS_WHNAT(state->in) && FROM_WED(skb)) {
developere567ad32021-05-25 17:16:17 +0800573 return;
574 } else if (IS_LAN(state->in)) {
developerfd40db22021-04-29 10:08:25 +0800575 skb_hnat_iface(skb) = FOE_MAGIC_GE_LAN;
developerd35bbcc2022-09-28 22:46:01 +0800576 } else if (IS_LAN2(state->in)) {
577 skb_hnat_iface(skb) = FOE_MAGIC_GE_LAN2;
developerfd40db22021-04-29 10:08:25 +0800578 } else if (IS_PPD(state->in)) {
579 skb_hnat_iface(skb) = FOE_MAGIC_GE_PPD;
580 } else if (IS_EXT(state->in)) {
581 skb_hnat_iface(skb) = FOE_MAGIC_EXT;
582 } else if (IS_WAN(state->in)) {
583 skb_hnat_iface(skb) = FOE_MAGIC_GE_WAN;
developerfd40db22021-04-29 10:08:25 +0800584 } else if (!IS_BR(state->in)) {
developer99506e52021-06-30 22:03:02 +0800585 if (state->in->netdev_ops->ndo_flow_offload_check) {
586 skb_hnat_iface(skb) = FOE_MAGIC_GE_VIRTUAL;
587 } else {
588 skb_hnat_iface(skb) = FOE_INVALID;
developerfd40db22021-04-29 10:08:25 +0800589
developer99506e52021-06-30 22:03:02 +0800590 if (is_magic_tag_valid(skb) &&
591 IS_SPACE_AVAILABLE_HEAD(skb))
592 memset(skb_hnat_info(skb), 0, FOE_INFO_LEN);
593 }
developerfd40db22021-04-29 10:08:25 +0800594 }
595}
596
597static inline void hnat_set_alg(const struct nf_hook_state *state,
598 struct sk_buff *skb, int val)
599{
600 skb_hnat_alg(skb) = val;
601}
602
603static inline void hnat_set_head_frags(const struct nf_hook_state *state,
604 struct sk_buff *head_skb, int val,
605 void (*fn)(const struct nf_hook_state *state,
606 struct sk_buff *skb, int val))
607{
608 struct sk_buff *segs = skb_shinfo(head_skb)->frag_list;
609
610 fn(state, head_skb, val);
611 while (segs) {
612 fn(state, segs, val);
613 segs = segs->next;
614 }
615}
616
developer25fc8c02022-05-06 16:24:02 +0800617static void ppe_fill_flow_lbl(struct foe_entry *entry, struct ipv6hdr *ip6h)
618{
619 entry->ipv4_dslite.flow_lbl[0] = ip6h->flow_lbl[2];
620 entry->ipv4_dslite.flow_lbl[1] = ip6h->flow_lbl[1];
621 entry->ipv4_dslite.flow_lbl[2] = ip6h->flow_lbl[0];
622}
623
developerfd40db22021-04-29 10:08:25 +0800624unsigned int do_hnat_mape_w2l_fast(struct sk_buff *skb, const struct net_device *in,
625 const char *func)
626{
627 struct ipv6hdr *ip6h = ipv6_hdr(skb);
628 struct iphdr _iphdr;
629 struct iphdr *iph;
630 struct ethhdr *eth;
631
632 /* WAN -> LAN/WLAN MapE. */
633 if (mape_toggle && (ip6h->nexthdr == NEXTHDR_IPIP)) {
634 iph = skb_header_pointer(skb, IPV6_HDR_LEN, sizeof(_iphdr), &_iphdr);
developer4c32b7a2021-11-13 16:46:43 +0800635 if (unlikely(!iph))
636 return -1;
637
developerfd40db22021-04-29 10:08:25 +0800638 switch (iph->protocol) {
639 case IPPROTO_UDP:
640 case IPPROTO_TCP:
641 break;
642 default:
643 return -1;
644 }
645 mape_w2l_v6h = *ip6h;
646
647 /* Remove ipv6 header. */
648 memcpy(skb->data + IPV6_HDR_LEN - ETH_HLEN,
649 skb->data - ETH_HLEN, ETH_HLEN);
650 skb_pull(skb, IPV6_HDR_LEN - ETH_HLEN);
651 skb_set_mac_header(skb, 0);
652 skb_set_network_header(skb, ETH_HLEN);
653 skb_set_transport_header(skb, ETH_HLEN + sizeof(_iphdr));
654
655 eth = eth_hdr(skb);
656 eth->h_proto = htons(ETH_P_IP);
657 set_to_ppe(skb);
658
659 skb->vlan_proto = htons(ETH_P_8021Q);
660 skb->vlan_tci =
661 (VLAN_CFI_MASK | (in->ifindex & VLAN_VID_MASK));
662
663 if (!hnat_priv->g_ppdev)
664 hnat_priv->g_ppdev = dev_get_by_name(&init_net, hnat_priv->ppd);
665
666 skb->dev = hnat_priv->g_ppdev;
667 skb->protocol = htons(ETH_P_IP);
668
669 dev_queue_xmit(skb);
670
671 return 0;
672 }
673 return -1;
674}
675
developer25fc8c02022-05-06 16:24:02 +0800676
developer25fc8c02022-05-06 16:24:02 +0800677
developerfd40db22021-04-29 10:08:25 +0800678static unsigned int is_ppe_support_type(struct sk_buff *skb)
679{
680 struct ethhdr *eth = NULL;
681 struct iphdr *iph = NULL;
682 struct ipv6hdr *ip6h = NULL;
683 struct iphdr _iphdr;
684
685 eth = eth_hdr(skb);
developerfd2d7422021-06-09 17:09:39 +0800686 if (!is_magic_tag_valid(skb) || !IS_SPACE_AVAILABLE_HEAD(skb) ||
developerb254f762022-01-20 20:06:25 +0800687 is_broadcast_ether_addr(eth->h_dest))
developerfd40db22021-04-29 10:08:25 +0800688 return 0;
689
690 switch (ntohs(skb->protocol)) {
691 case ETH_P_IP:
692 iph = ip_hdr(skb);
693
694 /* do not accelerate non tcp/udp traffic */
695 if ((iph->protocol == IPPROTO_TCP) ||
696 (iph->protocol == IPPROTO_UDP) ||
697 (iph->protocol == IPPROTO_IPV6)) {
698 return 1;
699 }
700
701 break;
702 case ETH_P_IPV6:
703 ip6h = ipv6_hdr(skb);
704
705 if ((ip6h->nexthdr == NEXTHDR_TCP) ||
706 (ip6h->nexthdr == NEXTHDR_UDP)) {
707 return 1;
708 } else if (ip6h->nexthdr == NEXTHDR_IPIP) {
709 iph = skb_header_pointer(skb, IPV6_HDR_LEN,
710 sizeof(_iphdr), &_iphdr);
developer4c32b7a2021-11-13 16:46:43 +0800711 if (unlikely(!iph))
712 return 0;
developerfd40db22021-04-29 10:08:25 +0800713
714 if ((iph->protocol == IPPROTO_TCP) ||
715 (iph->protocol == IPPROTO_UDP)) {
716 return 1;
717 }
718
719 }
720
721 break;
722 case ETH_P_8021Q:
723 return 1;
724 }
725
726 return 0;
727}
728
729static unsigned int
730mtk_hnat_ipv6_nf_pre_routing(void *priv, struct sk_buff *skb,
731 const struct nf_hook_state *state)
732{
733 if (!is_ppe_support_type(skb)) {
734 hnat_set_head_frags(state, skb, 1, hnat_set_alg);
735 return NF_ACCEPT;
736 }
737
738 hnat_set_head_frags(state, skb, -1, hnat_set_iif);
739
740 pre_routing_print(skb, state->in, state->out, __func__);
741
developerfd40db22021-04-29 10:08:25 +0800742 /* packets from external devices -> xxx ,step 1 , learning stage & bound stage*/
743 if (do_ext2ge_fast_try(state->in, skb)) {
744 if (!do_hnat_ext_to_ge(skb, state->in, __func__))
745 return NF_STOLEN;
746 if (!skb)
747 goto drop;
748 return NF_ACCEPT;
749 }
750
751 /* packets form ge -> external device
752 * For standalone wan interface
753 */
754 if (do_ge2ext_fast(state->in, skb)) {
755 if (!do_hnat_ge_to_ext(skb, __func__))
756 return NF_STOLEN;
757 goto drop;
758 }
759
developerf4c370a2022-10-08 17:01:19 +0800760
761#if !(defined(CONFIG_MEDIATEK_NETSYS_V2) || defined(CONFIG_MEDIATEK_NETSYS_V3))
developerfd40db22021-04-29 10:08:25 +0800762 /* MapE need remove ipv6 header and pingpong. */
763 if (do_mape_w2l_fast(state->in, skb)) {
764 if (!do_hnat_mape_w2l_fast(skb, state->in, __func__))
765 return NF_STOLEN;
766 else
767 return NF_ACCEPT;
768 }
769
770 if (is_from_mape(skb))
771 clr_from_extge(skb);
developerf4c370a2022-10-08 17:01:19 +0800772#endif
developerfd40db22021-04-29 10:08:25 +0800773 return NF_ACCEPT;
774drop:
775 printk_ratelimited(KERN_WARNING
776 "%s:drop (in_dev=%s, iif=0x%x, CB2=0x%x, ppe_hash=0x%x, sport=0x%x, reason=0x%x, alg=0x%x)\n",
777 __func__, state->in->name, skb_hnat_iface(skb),
778 HNAT_SKB_CB2(skb)->magic, skb_hnat_entry(skb),
779 skb_hnat_sport(skb), skb_hnat_reason(skb),
780 skb_hnat_alg(skb));
781
782 return NF_DROP;
783}
784
785static unsigned int
786mtk_hnat_ipv4_nf_pre_routing(void *priv, struct sk_buff *skb,
787 const struct nf_hook_state *state)
788{
789 if (!is_ppe_support_type(skb)) {
790 hnat_set_head_frags(state, skb, 1, hnat_set_alg);
791 return NF_ACCEPT;
792 }
793
794 hnat_set_head_frags(state, skb, -1, hnat_set_iif);
795
796 pre_routing_print(skb, state->in, state->out, __func__);
797
developerfd40db22021-04-29 10:08:25 +0800798 /* packets from external devices -> xxx ,step 1 , learning stage & bound stage*/
799 if (do_ext2ge_fast_try(state->in, skb)) {
800 if (!do_hnat_ext_to_ge(skb, state->in, __func__))
801 return NF_STOLEN;
802 if (!skb)
803 goto drop;
804 return NF_ACCEPT;
805 }
806
807 /* packets form ge -> external device
808 * For standalone wan interface
809 */
810 if (do_ge2ext_fast(state->in, skb)) {
811 if (!do_hnat_ge_to_ext(skb, __func__))
812 return NF_STOLEN;
813 goto drop;
814 }
815
816 return NF_ACCEPT;
817drop:
818 printk_ratelimited(KERN_WARNING
819 "%s:drop (in_dev=%s, iif=0x%x, CB2=0x%x, ppe_hash=0x%x, sport=0x%x, reason=0x%x, alg=0x%x)\n",
820 __func__, state->in->name, skb_hnat_iface(skb),
821 HNAT_SKB_CB2(skb)->magic, skb_hnat_entry(skb),
822 skb_hnat_sport(skb), skb_hnat_reason(skb),
823 skb_hnat_alg(skb));
824
825 return NF_DROP;
826}
827
828static unsigned int
829mtk_hnat_br_nf_local_in(void *priv, struct sk_buff *skb,
830 const struct nf_hook_state *state)
831{
developerfd40db22021-04-29 10:08:25 +0800832 struct vlan_ethhdr *veth;
833
developer34028fb2022-01-11 13:51:29 +0800834 if (IS_HQOS_MODE && hnat_priv->data->whnat) {
developerfd40db22021-04-29 10:08:25 +0800835 veth = (struct vlan_ethhdr *)skb_mac_header(skb);
836
837 if (eth_hdr(skb)->h_proto == HQOS_MAGIC_TAG) {
838 skb_hnat_entry(skb) = ntohs(veth->h_vlan_TCI) & 0x3fff;
839 skb_hnat_reason(skb) = HIT_BIND_FORCE_TO_CPU;
840 }
841 }
developerfd40db22021-04-29 10:08:25 +0800842
843 if (!HAS_HQOS_MAGIC_TAG(skb) && !is_ppe_support_type(skb)) {
844 hnat_set_head_frags(state, skb, 1, hnat_set_alg);
845 return NF_ACCEPT;
846 }
847
848 hnat_set_head_frags(state, skb, -1, hnat_set_iif);
849
850 pre_routing_print(skb, state->in, state->out, __func__);
851
852 if (unlikely(debug_level >= 7)) {
853 hnat_cpu_reason_cnt(skb);
854 if (skb_hnat_reason(skb) == dbg_cpu_reason)
855 foe_dump_pkt(skb);
856 }
857
developerfd40db22021-04-29 10:08:25 +0800858 /* packets from external devices -> xxx ,step 1 , learning stage & bound stage*/
859 if ((skb_hnat_iface(skb) == FOE_MAGIC_EXT) && !is_from_extge(skb) &&
860 !is_multicast_ether_addr(eth_hdr(skb)->h_dest)) {
861 if (!hnat_priv->g_ppdev)
862 hnat_priv->g_ppdev = dev_get_by_name(&init_net, hnat_priv->ppd);
863
864 if (!do_hnat_ext_to_ge(skb, state->in, __func__))
865 return NF_STOLEN;
866 if (!skb)
867 goto drop;
868 return NF_ACCEPT;
869 }
870
871 if (hnat_priv->data->whnat) {
872 if (skb_hnat_iface(skb) == FOE_MAGIC_EXT)
873 clr_from_extge(skb);
874
875 /* packets from external devices -> xxx ,step 2, learning stage */
developeraf07fad2021-11-19 17:53:42 +0800876 if (do_ext2ge_fast_learn(state->in, skb) && (!qos_toggle ||
877 (qos_toggle && eth_hdr(skb)->h_proto != HQOS_MAGIC_TAG))) {
developerfd40db22021-04-29 10:08:25 +0800878 if (!do_hnat_ext_to_ge2(skb, __func__))
879 return NF_STOLEN;
880 goto drop;
881 }
882
883 /* packets form ge -> external device */
884 if (do_ge2ext_fast(state->in, skb)) {
885 if (!do_hnat_ge_to_ext(skb, __func__))
886 return NF_STOLEN;
887 goto drop;
888 }
889 }
890
developerf4c370a2022-10-08 17:01:19 +0800891#if !(defined(CONFIG_MEDIATEK_NETSYS_V2) || defined(CONFIG_MEDIATEK_NETSYS_V3))
developerfd40db22021-04-29 10:08:25 +0800892 /* MapE need remove ipv6 header and pingpong. (bridge mode) */
893 if (do_mape_w2l_fast(state->in, skb)) {
894 if (!do_hnat_mape_w2l_fast(skb, state->in, __func__))
895 return NF_STOLEN;
896 else
897 return NF_ACCEPT;
898 }
developerf4c370a2022-10-08 17:01:19 +0800899#endif
developerfd40db22021-04-29 10:08:25 +0800900 return NF_ACCEPT;
901drop:
902 printk_ratelimited(KERN_WARNING
903 "%s:drop (in_dev=%s, iif=0x%x, CB2=0x%x, ppe_hash=0x%x, sport=0x%x, reason=0x%x, alg=0x%x)\n",
904 __func__, state->in->name, skb_hnat_iface(skb),
905 HNAT_SKB_CB2(skb)->magic, skb_hnat_entry(skb),
906 skb_hnat_sport(skb), skb_hnat_reason(skb),
907 skb_hnat_alg(skb));
908
909 return NF_DROP;
910}
911
912static unsigned int hnat_ipv6_get_nexthop(struct sk_buff *skb,
913 const struct net_device *out,
914 struct flow_offload_hw_path *hw_path)
915{
916 const struct in6_addr *ipv6_nexthop;
917 struct neighbour *neigh = NULL;
918 struct dst_entry *dst = skb_dst(skb);
919 struct ethhdr *eth;
920
921 if (hw_path->flags & FLOW_OFFLOAD_PATH_PPPOE) {
922 memcpy(eth_hdr(skb)->h_source, hw_path->eth_src, ETH_ALEN);
923 memcpy(eth_hdr(skb)->h_dest, hw_path->eth_dest, ETH_ALEN);
924 return 0;
925 }
926
927 rcu_read_lock_bh();
928 ipv6_nexthop =
929 rt6_nexthop((struct rt6_info *)dst, &ipv6_hdr(skb)->daddr);
930 neigh = __ipv6_neigh_lookup_noref(dst->dev, ipv6_nexthop);
931 if (unlikely(!neigh)) {
932 dev_notice(hnat_priv->dev, "%s:No neigh (daddr=%pI6)\n", __func__,
933 &ipv6_hdr(skb)->daddr);
934 rcu_read_unlock_bh();
935 return -1;
936 }
937
938 /* why do we get all zero ethernet address ? */
939 if (!is_valid_ether_addr(neigh->ha)) {
940 rcu_read_unlock_bh();
941 return -1;
942 }
943
944 if (ipv6_hdr(skb)->nexthdr == NEXTHDR_IPIP) {
945 /*copy ether type for DS-Lite and MapE */
946 eth = (struct ethhdr *)(skb->data - ETH_HLEN);
947 eth->h_proto = skb->protocol;
948 } else {
949 eth = eth_hdr(skb);
950 }
951
952 ether_addr_copy(eth->h_dest, neigh->ha);
953 ether_addr_copy(eth->h_source, out->dev_addr);
954
955 rcu_read_unlock_bh();
956
957 return 0;
958}
959
960static unsigned int hnat_ipv4_get_nexthop(struct sk_buff *skb,
961 const struct net_device *out,
962 struct flow_offload_hw_path *hw_path)
963{
964 u32 nexthop;
965 struct neighbour *neigh;
966 struct dst_entry *dst = skb_dst(skb);
967 struct rtable *rt = (struct rtable *)dst;
968 struct net_device *dev = (__force struct net_device *)out;
969
970 if (hw_path->flags & FLOW_OFFLOAD_PATH_PPPOE) {
971 memcpy(eth_hdr(skb)->h_source, hw_path->eth_src, ETH_ALEN);
972 memcpy(eth_hdr(skb)->h_dest, hw_path->eth_dest, ETH_ALEN);
973 return 0;
974 }
975
976 rcu_read_lock_bh();
977 nexthop = (__force u32)rt_nexthop(rt, ip_hdr(skb)->daddr);
978 neigh = __ipv4_neigh_lookup_noref(dev, nexthop);
979 if (unlikely(!neigh)) {
980 dev_notice(hnat_priv->dev, "%s:No neigh (daddr=%pI4)\n", __func__,
981 &ip_hdr(skb)->daddr);
982 rcu_read_unlock_bh();
983 return -1;
984 }
985
986 /* why do we get all zero ethernet address ? */
987 if (!is_valid_ether_addr(neigh->ha)) {
988 rcu_read_unlock_bh();
989 return -1;
990 }
991
992 memcpy(eth_hdr(skb)->h_dest, neigh->ha, ETH_ALEN);
993 memcpy(eth_hdr(skb)->h_source, out->dev_addr, ETH_ALEN);
994
995 rcu_read_unlock_bh();
996
997 return 0;
998}
999
1000static u16 ppe_get_chkbase(struct iphdr *iph)
1001{
1002 u16 org_chksum = ntohs(iph->check);
1003 u16 org_tot_len = ntohs(iph->tot_len);
1004 u16 org_id = ntohs(iph->id);
1005 u16 chksum_tmp, tot_len_tmp, id_tmp;
1006 u32 tmp = 0;
1007 u16 chksum_base = 0;
1008
1009 chksum_tmp = ~(org_chksum);
1010 tot_len_tmp = ~(org_tot_len);
1011 id_tmp = ~(org_id);
1012 tmp = chksum_tmp + tot_len_tmp + id_tmp;
1013 tmp = ((tmp >> 16) & 0x7) + (tmp & 0xFFFF);
1014 tmp = ((tmp >> 16) & 0x7) + (tmp & 0xFFFF);
1015 chksum_base = tmp & 0xFFFF;
1016
1017 return chksum_base;
1018}
1019
1020struct foe_entry ppe_fill_L2_info(struct ethhdr *eth, struct foe_entry entry,
1021 struct flow_offload_hw_path *hw_path)
1022{
developer5ffc5f12022-10-25 18:51:46 +08001023 switch ((int)entry.bfib1.pkt_type) {
developerfd40db22021-04-29 10:08:25 +08001024 case IPV4_HNAPT:
1025 case IPV4_HNAT:
1026 entry.ipv4_hnapt.dmac_hi = swab32(*((u32 *)eth->h_dest));
1027 entry.ipv4_hnapt.dmac_lo = swab16(*((u16 *)&eth->h_dest[4]));
1028 entry.ipv4_hnapt.smac_hi = swab32(*((u32 *)eth->h_source));
1029 entry.ipv4_hnapt.smac_lo = swab16(*((u16 *)&eth->h_source[4]));
1030 entry.ipv4_hnapt.pppoe_id = hw_path->pppoe_sid;
1031 break;
1032 case IPV4_DSLITE:
1033 case IPV4_MAP_E:
1034 case IPV6_6RD:
1035 case IPV6_5T_ROUTE:
1036 case IPV6_3T_ROUTE:
developer5ffc5f12022-10-25 18:51:46 +08001037 case IPV6_HNAPT:
1038 case IPV6_HNAT:
developerfd40db22021-04-29 10:08:25 +08001039 entry.ipv6_5t_route.dmac_hi = swab32(*((u32 *)eth->h_dest));
1040 entry.ipv6_5t_route.dmac_lo = swab16(*((u16 *)&eth->h_dest[4]));
1041 entry.ipv6_5t_route.smac_hi = swab32(*((u32 *)eth->h_source));
1042 entry.ipv6_5t_route.smac_lo =
1043 swab16(*((u16 *)&eth->h_source[4]));
1044 entry.ipv6_5t_route.pppoe_id = hw_path->pppoe_sid;
1045 break;
1046 }
1047 return entry;
1048}
1049
1050struct foe_entry ppe_fill_info_blk(struct ethhdr *eth, struct foe_entry entry,
1051 struct flow_offload_hw_path *hw_path)
1052{
1053 entry.bfib1.psn = (hw_path->flags & FLOW_OFFLOAD_PATH_PPPOE) ? 1 : 0;
1054 entry.bfib1.vlan_layer += (hw_path->flags & FLOW_OFFLOAD_PATH_VLAN) ? 1 : 0;
1055 entry.bfib1.vpm = (entry.bfib1.vlan_layer) ? 1 : 0;
developerfd40db22021-04-29 10:08:25 +08001056 entry.bfib1.cah = 1;
developerd35bbcc2022-09-28 22:46:01 +08001057 entry.bfib1.time_stamp = (hnat_priv->data->version == MTK_HNAT_V4 ||
1058 hnat_priv->data->version == MTK_HNAT_V5) ?
developerfd40db22021-04-29 10:08:25 +08001059 readl(hnat_priv->fe_base + 0x0010) & (0xFF) :
1060 readl(hnat_priv->fe_base + 0x0010) & (0x7FFF);
1061
developer5ffc5f12022-10-25 18:51:46 +08001062 switch ((int)entry.bfib1.pkt_type) {
developerfd40db22021-04-29 10:08:25 +08001063 case IPV4_HNAPT:
1064 case IPV4_HNAT:
developer8116b0a2021-08-23 18:07:20 +08001065 if (hnat_priv->data->mcast &&
1066 is_multicast_ether_addr(&eth->h_dest[0])) {
developerfd40db22021-04-29 10:08:25 +08001067 entry.ipv4_hnapt.iblk2.mcast = 1;
1068 if (hnat_priv->data->version == MTK_HNAT_V3) {
1069 entry.bfib1.sta = 1;
1070 entry.ipv4_hnapt.m_timestamp = foe_timestamp(hnat_priv);
1071 }
1072 } else {
1073 entry.ipv4_hnapt.iblk2.mcast = 0;
1074 }
1075
1076 entry.ipv4_hnapt.iblk2.port_ag =
developerd35bbcc2022-09-28 22:46:01 +08001077 (hnat_priv->data->version == MTK_HNAT_V4 ||
1078 hnat_priv->data->version == MTK_HNAT_V5) ? 0xf : 0x3f;
developerfd40db22021-04-29 10:08:25 +08001079 break;
1080 case IPV4_DSLITE:
1081 case IPV4_MAP_E:
1082 case IPV6_6RD:
1083 case IPV6_5T_ROUTE:
1084 case IPV6_3T_ROUTE:
developer5ffc5f12022-10-25 18:51:46 +08001085 case IPV6_HNAPT:
1086 case IPV6_HNAT:
developer8116b0a2021-08-23 18:07:20 +08001087 if (hnat_priv->data->mcast &&
1088 is_multicast_ether_addr(&eth->h_dest[0])) {
developerfd40db22021-04-29 10:08:25 +08001089 entry.ipv6_5t_route.iblk2.mcast = 1;
1090 if (hnat_priv->data->version == MTK_HNAT_V3) {
1091 entry.bfib1.sta = 1;
1092 entry.ipv4_hnapt.m_timestamp = foe_timestamp(hnat_priv);
1093 }
1094 } else {
1095 entry.ipv6_5t_route.iblk2.mcast = 0;
1096 }
1097
1098 entry.ipv6_5t_route.iblk2.port_ag =
developerd35bbcc2022-09-28 22:46:01 +08001099 (hnat_priv->data->version == MTK_HNAT_V4 ||
1100 hnat_priv->data->version == MTK_HNAT_V5) ? 0xf : 0x3f;
developerfd40db22021-04-29 10:08:25 +08001101 break;
1102 }
1103 return entry;
1104}
1105
developerfd40db22021-04-29 10:08:25 +08001106static unsigned int skb_to_hnat_info(struct sk_buff *skb,
1107 const struct net_device *dev,
1108 struct foe_entry *foe,
1109 struct flow_offload_hw_path *hw_path)
1110{
1111 struct foe_entry entry = { 0 };
1112 int whnat = IS_WHNAT(dev);
1113 struct ethhdr *eth;
1114 struct iphdr *iph;
1115 struct ipv6hdr *ip6h;
1116 struct tcpudphdr _ports;
1117 const struct tcpudphdr *pptr;
developer5ffc5f12022-10-25 18:51:46 +08001118 struct nf_conn *ct;
1119 enum ip_conntrack_info ctinfo;
developerfd40db22021-04-29 10:08:25 +08001120 u32 gmac = NR_DISCARD;
1121 int udp = 0;
1122 u32 qid = 0;
developeraf07fad2021-11-19 17:53:42 +08001123 u32 port_id = 0;
developerfd40db22021-04-29 10:08:25 +08001124 int mape = 0;
1125
developer5ffc5f12022-10-25 18:51:46 +08001126 ct = nf_ct_get(skb, &ctinfo);
1127
developerfd40db22021-04-29 10:08:25 +08001128 if (ipv6_hdr(skb)->nexthdr == NEXTHDR_IPIP)
1129 /* point to ethernet header for DS-Lite and MapE */
1130 eth = (struct ethhdr *)(skb->data - ETH_HLEN);
1131 else
1132 eth = eth_hdr(skb);
developer8116b0a2021-08-23 18:07:20 +08001133
1134 /*do not bind multicast if PPE mcast not enable*/
1135 if (!hnat_priv->data->mcast && is_multicast_ether_addr(eth->h_dest))
1136 return 0;
developerfd40db22021-04-29 10:08:25 +08001137
1138 entry.bfib1.pkt_type = foe->udib1.pkt_type; /* Get packte type state*/
developerf94d8862022-03-29 10:11:17 +08001139 entry.bfib1.state = foe->udib1.state;
1140
developerd35bbcc2022-09-28 22:46:01 +08001141#if defined(CONFIG_MEDIATEK_NETSYS_V2) || defined(CONFIG_MEDIATEK_NETSYS_V3)
developerfd40db22021-04-29 10:08:25 +08001142 entry.bfib1.sp = foe->udib1.sp;
1143#endif
1144
1145 switch (ntohs(eth->h_proto)) {
1146 case ETH_P_IP:
1147 iph = ip_hdr(skb);
1148 switch (iph->protocol) {
1149 case IPPROTO_UDP:
1150 udp = 1;
1151 /* fallthrough */
1152 case IPPROTO_TCP:
1153 entry.ipv4_hnapt.etype = htons(ETH_P_IP);
1154
1155 /* DS-Lite WAN->LAN */
1156 if (entry.ipv4_hnapt.bfib1.pkt_type == IPV4_DSLITE ||
1157 entry.ipv4_hnapt.bfib1.pkt_type == IPV4_MAP_E) {
1158 entry.ipv4_dslite.sip = foe->ipv4_dslite.sip;
1159 entry.ipv4_dslite.dip = foe->ipv4_dslite.dip;
1160 entry.ipv4_dslite.sport =
1161 foe->ipv4_dslite.sport;
1162 entry.ipv4_dslite.dport =
1163 foe->ipv4_dslite.dport;
1164
developerd35bbcc2022-09-28 22:46:01 +08001165#if defined(CONFIG_MEDIATEK_NETSYS_V2) || defined(CONFIG_MEDIATEK_NETSYS_V3)
developerfd40db22021-04-29 10:08:25 +08001166 if (entry.bfib1.pkt_type == IPV4_MAP_E) {
1167 pptr = skb_header_pointer(skb,
1168 iph->ihl * 4,
1169 sizeof(_ports),
1170 &_ports);
developer4c32b7a2021-11-13 16:46:43 +08001171 if (unlikely(!pptr))
1172 return -1;
developerfd40db22021-04-29 10:08:25 +08001173
developerd35bbcc2022-09-28 22:46:01 +08001174 entry.ipv4_mape.new_sip =
developerfd40db22021-04-29 10:08:25 +08001175 ntohl(iph->saddr);
developerd35bbcc2022-09-28 22:46:01 +08001176 entry.ipv4_mape.new_dip =
developerfd40db22021-04-29 10:08:25 +08001177 ntohl(iph->daddr);
developerd35bbcc2022-09-28 22:46:01 +08001178 entry.ipv4_mape.new_sport =
developerfd40db22021-04-29 10:08:25 +08001179 ntohs(pptr->src);
developerd35bbcc2022-09-28 22:46:01 +08001180 entry.ipv4_mape.new_dport =
developerfd40db22021-04-29 10:08:25 +08001181 ntohs(pptr->dst);
1182 }
1183#endif
1184
1185 entry.ipv4_dslite.tunnel_sipv6_0 =
1186 foe->ipv4_dslite.tunnel_sipv6_0;
1187 entry.ipv4_dslite.tunnel_sipv6_1 =
1188 foe->ipv4_dslite.tunnel_sipv6_1;
1189 entry.ipv4_dslite.tunnel_sipv6_2 =
1190 foe->ipv4_dslite.tunnel_sipv6_2;
1191 entry.ipv4_dslite.tunnel_sipv6_3 =
1192 foe->ipv4_dslite.tunnel_sipv6_3;
1193
1194 entry.ipv4_dslite.tunnel_dipv6_0 =
1195 foe->ipv4_dslite.tunnel_dipv6_0;
1196 entry.ipv4_dslite.tunnel_dipv6_1 =
1197 foe->ipv4_dslite.tunnel_dipv6_1;
1198 entry.ipv4_dslite.tunnel_dipv6_2 =
1199 foe->ipv4_dslite.tunnel_dipv6_2;
1200 entry.ipv4_dslite.tunnel_dipv6_3 =
1201 foe->ipv4_dslite.tunnel_dipv6_3;
1202
1203 entry.ipv4_dslite.bfib1.rmt = 1;
1204 entry.ipv4_dslite.iblk2.dscp = iph->tos;
1205 entry.ipv4_dslite.vlan1 = hw_path->vlan_id;
1206 if (hnat_priv->data->per_flow_accounting)
1207 entry.ipv4_dslite.iblk2.mibf = 1;
1208
1209 } else {
1210 entry.ipv4_hnapt.iblk2.dscp = iph->tos;
1211 if (hnat_priv->data->per_flow_accounting)
1212 entry.ipv4_hnapt.iblk2.mibf = 1;
1213
1214 entry.ipv4_hnapt.vlan1 = hw_path->vlan_id;
1215
developerd35bbcc2022-09-28 22:46:01 +08001216 if (skb->vlan_tci && FROM_GE_WAN(skb) &&
1217 IS_LAN_GRP(dev)) {
developerfd40db22021-04-29 10:08:25 +08001218 entry.bfib1.vlan_layer += 1;
1219
1220 if (entry.ipv4_hnapt.vlan1)
1221 entry.ipv4_hnapt.vlan2 = (skb->vlan_tci & VLAN_VID_MASK);
1222 else
1223 entry.ipv4_hnapt.vlan1 = (skb->vlan_tci & VLAN_VID_MASK);
1224 }
1225
1226 entry.ipv4_hnapt.sip = foe->ipv4_hnapt.sip;
1227 entry.ipv4_hnapt.dip = foe->ipv4_hnapt.dip;
1228 entry.ipv4_hnapt.sport = foe->ipv4_hnapt.sport;
1229 entry.ipv4_hnapt.dport = foe->ipv4_hnapt.dport;
1230
1231 entry.ipv4_hnapt.new_sip = ntohl(iph->saddr);
1232 entry.ipv4_hnapt.new_dip = ntohl(iph->daddr);
1233 }
1234
1235 entry.ipv4_hnapt.bfib1.udp = udp;
1236 if (IS_IPV4_HNAPT(foe)) {
1237 pptr = skb_header_pointer(skb, iph->ihl * 4,
1238 sizeof(_ports),
1239 &_ports);
developer4c32b7a2021-11-13 16:46:43 +08001240 if (unlikely(!pptr))
1241 return -1;
1242
developerfd40db22021-04-29 10:08:25 +08001243 entry.ipv4_hnapt.new_sport = ntohs(pptr->src);
1244 entry.ipv4_hnapt.new_dport = ntohs(pptr->dst);
1245 }
1246
1247 break;
1248
1249 default:
1250 return -1;
1251 }
1252 trace_printk(
1253 "[%s]skb->head=%p, skb->data=%p,ip_hdr=%p, skb->len=%d, skb->data_len=%d\n",
1254 __func__, skb->head, skb->data, iph, skb->len,
1255 skb->data_len);
1256 break;
1257
1258 case ETH_P_IPV6:
1259 ip6h = ipv6_hdr(skb);
1260 switch (ip6h->nexthdr) {
1261 case NEXTHDR_UDP:
1262 udp = 1;
1263 /* fallthrough */
1264 case NEXTHDR_TCP: /* IPv6-5T or IPv6-3T */
1265 entry.ipv6_5t_route.etype = htons(ETH_P_IPV6);
1266
1267 entry.ipv6_5t_route.vlan1 = hw_path->vlan_id;
1268
developerd35bbcc2022-09-28 22:46:01 +08001269 if (skb->vlan_tci && FROM_GE_WAN(skb) &&
1270 IS_LAN_GRP(dev)) {
developerfd40db22021-04-29 10:08:25 +08001271 entry.bfib1.vlan_layer += 1;
1272
1273 if (entry.ipv6_5t_route.vlan1)
1274 entry.ipv6_5t_route.vlan2 = (skb->vlan_tci & VLAN_VID_MASK);
1275 else
1276 entry.ipv6_5t_route.vlan1 = (skb->vlan_tci & VLAN_VID_MASK);
1277 }
1278
1279 if (hnat_priv->data->per_flow_accounting)
1280 entry.ipv6_5t_route.iblk2.mibf = 1;
1281 entry.ipv6_5t_route.bfib1.udp = udp;
1282
1283 if (IS_IPV6_6RD(foe)) {
1284 entry.ipv6_5t_route.bfib1.rmt = 1;
1285 entry.ipv6_6rd.tunnel_sipv4 =
1286 foe->ipv6_6rd.tunnel_sipv4;
1287 entry.ipv6_6rd.tunnel_dipv4 =
1288 foe->ipv6_6rd.tunnel_dipv4;
1289 }
1290
1291 entry.ipv6_3t_route.ipv6_sip0 =
1292 foe->ipv6_3t_route.ipv6_sip0;
1293 entry.ipv6_3t_route.ipv6_sip1 =
1294 foe->ipv6_3t_route.ipv6_sip1;
1295 entry.ipv6_3t_route.ipv6_sip2 =
1296 foe->ipv6_3t_route.ipv6_sip2;
1297 entry.ipv6_3t_route.ipv6_sip3 =
1298 foe->ipv6_3t_route.ipv6_sip3;
1299
1300 entry.ipv6_3t_route.ipv6_dip0 =
1301 foe->ipv6_3t_route.ipv6_dip0;
1302 entry.ipv6_3t_route.ipv6_dip1 =
1303 foe->ipv6_3t_route.ipv6_dip1;
1304 entry.ipv6_3t_route.ipv6_dip2 =
1305 foe->ipv6_3t_route.ipv6_dip2;
1306 entry.ipv6_3t_route.ipv6_dip3 =
1307 foe->ipv6_3t_route.ipv6_dip3;
1308
developer729f0272021-06-09 17:28:38 +08001309 if (IS_IPV6_3T_ROUTE(foe)) {
1310 entry.ipv6_3t_route.prot =
1311 foe->ipv6_3t_route.prot;
1312 entry.ipv6_3t_route.hph =
1313 foe->ipv6_3t_route.hph;
1314 }
1315
developerfd40db22021-04-29 10:08:25 +08001316 if (IS_IPV6_5T_ROUTE(foe) || IS_IPV6_6RD(foe)) {
1317 entry.ipv6_5t_route.sport =
1318 foe->ipv6_5t_route.sport;
1319 entry.ipv6_5t_route.dport =
1320 foe->ipv6_5t_route.dport;
1321 }
developer5ffc5f12022-10-25 18:51:46 +08001322
1323#if defined(CONFIG_MEDIATEK_NETSYS_V3)
1324 if (ct && (ct->status & IPS_SRC_NAT)) {
1325 entry.bfib1.pkt_type = IPV6_HNAPT;
1326
1327 if (IS_WAN(dev) || IS_DSA_WAN(dev)) {
1328 entry.ipv6_hnapt.eg_ipv6_dir =
1329 IPV6_SNAT;
1330 entry.ipv6_hnapt.new_ipv6_ip0 =
1331 ntohl(ip6h->saddr.s6_addr32[0]);
1332 entry.ipv6_hnapt.new_ipv6_ip1 =
1333 ntohl(ip6h->saddr.s6_addr32[1]);
1334 entry.ipv6_hnapt.new_ipv6_ip2 =
1335 ntohl(ip6h->saddr.s6_addr32[2]);
1336 entry.ipv6_hnapt.new_ipv6_ip3 =
1337 ntohl(ip6h->saddr.s6_addr32[3]);
1338 } else {
1339 entry.ipv6_hnapt.eg_ipv6_dir =
1340 IPV6_DNAT;
1341 entry.ipv6_hnapt.new_ipv6_ip0 =
1342 ntohl(ip6h->daddr.s6_addr32[0]);
1343 entry.ipv6_hnapt.new_ipv6_ip1 =
1344 ntohl(ip6h->daddr.s6_addr32[1]);
1345 entry.ipv6_hnapt.new_ipv6_ip2 =
1346 ntohl(ip6h->daddr.s6_addr32[2]);
1347 entry.ipv6_hnapt.new_ipv6_ip3 =
1348 ntohl(ip6h->daddr.s6_addr32[3]);
1349 }
1350
1351 pptr = skb_header_pointer(skb, IPV6_HDR_LEN,
1352 sizeof(_ports),
1353 &_ports);
1354 if (unlikely(!pptr))
1355 return -1;
1356
1357 entry.ipv6_hnapt.new_sport = ntohs(pptr->src);
1358 entry.ipv6_hnapt.new_dport = ntohs(pptr->dst);
1359 }
1360#endif
1361
developerfd40db22021-04-29 10:08:25 +08001362 entry.ipv6_5t_route.iblk2.dscp =
1363 (ip6h->priority << 4 |
1364 (ip6h->flow_lbl[0] >> 4));
1365 break;
1366
1367 case NEXTHDR_IPIP:
1368 if ((!mape_toggle &&
1369 entry.bfib1.pkt_type == IPV4_DSLITE) ||
1370 (mape_toggle &&
1371 entry.bfib1.pkt_type == IPV4_MAP_E)) {
1372 /* DS-Lite LAN->WAN */
1373 entry.ipv4_dslite.bfib1.udp =
1374 foe->ipv4_dslite.bfib1.udp;
1375 entry.ipv4_dslite.sip = foe->ipv4_dslite.sip;
1376 entry.ipv4_dslite.dip = foe->ipv4_dslite.dip;
1377 entry.ipv4_dslite.sport =
1378 foe->ipv4_dslite.sport;
1379 entry.ipv4_dslite.dport =
1380 foe->ipv4_dslite.dport;
1381
1382 entry.ipv4_dslite.tunnel_sipv6_0 =
1383 ntohl(ip6h->saddr.s6_addr32[0]);
1384 entry.ipv4_dslite.tunnel_sipv6_1 =
1385 ntohl(ip6h->saddr.s6_addr32[1]);
1386 entry.ipv4_dslite.tunnel_sipv6_2 =
1387 ntohl(ip6h->saddr.s6_addr32[2]);
1388 entry.ipv4_dslite.tunnel_sipv6_3 =
1389 ntohl(ip6h->saddr.s6_addr32[3]);
1390
1391 entry.ipv4_dslite.tunnel_dipv6_0 =
1392 ntohl(ip6h->daddr.s6_addr32[0]);
1393 entry.ipv4_dslite.tunnel_dipv6_1 =
1394 ntohl(ip6h->daddr.s6_addr32[1]);
1395 entry.ipv4_dslite.tunnel_dipv6_2 =
1396 ntohl(ip6h->daddr.s6_addr32[2]);
1397 entry.ipv4_dslite.tunnel_dipv6_3 =
1398 ntohl(ip6h->daddr.s6_addr32[3]);
1399
1400 ppe_fill_flow_lbl(&entry, ip6h);
1401
1402 entry.ipv4_dslite.priority = ip6h->priority;
1403 entry.ipv4_dslite.hop_limit = ip6h->hop_limit;
1404 entry.ipv4_dslite.vlan1 = hw_path->vlan_id;
1405 if (hnat_priv->data->per_flow_accounting)
1406 entry.ipv4_dslite.iblk2.mibf = 1;
developer25fc8c02022-05-06 16:24:02 +08001407 /* Map-E LAN->WAN record inner IPv4 header info. */
developer8c707df2022-10-24 14:09:00 +08001408#if defined(CONFIG_MEDIATEK_NETSYS_V2) || defined(CONFIG_MEDIATEK_NETSYS_V3)
developer25fc8c02022-05-06 16:24:02 +08001409 if (mape_toggle) {
1410 entry.ipv4_dslite.iblk2.dscp = foe->ipv4_dslite.iblk2.dscp;
developerd35bbcc2022-09-28 22:46:01 +08001411 entry.ipv4_mape.new_sip = foe->ipv4_mape.new_sip;
1412 entry.ipv4_mape.new_dip = foe->ipv4_mape.new_dip;
1413 entry.ipv4_mape.new_sport = foe->ipv4_mape.new_sport;
1414 entry.ipv4_mape.new_dport = foe->ipv4_mape.new_dport;
developer25fc8c02022-05-06 16:24:02 +08001415 }
1416#endif
developerfd40db22021-04-29 10:08:25 +08001417 } else if (mape_toggle &&
1418 entry.bfib1.pkt_type == IPV4_HNAPT) {
1419 /* MapE LAN -> WAN */
1420 mape = 1;
1421 entry.ipv4_hnapt.iblk2.dscp =
1422 foe->ipv4_hnapt.iblk2.dscp;
1423 if (hnat_priv->data->per_flow_accounting)
1424 entry.ipv4_hnapt.iblk2.mibf = 1;
1425
developerbb816412021-06-11 15:43:44 +08001426 if (IS_GMAC1_MODE)
1427 entry.ipv4_hnapt.vlan1 = 1;
1428 else
1429 entry.ipv4_hnapt.vlan1 = hw_path->vlan_id;
developerfd40db22021-04-29 10:08:25 +08001430
1431 entry.ipv4_hnapt.sip = foe->ipv4_hnapt.sip;
1432 entry.ipv4_hnapt.dip = foe->ipv4_hnapt.dip;
1433 entry.ipv4_hnapt.sport = foe->ipv4_hnapt.sport;
1434 entry.ipv4_hnapt.dport = foe->ipv4_hnapt.dport;
1435
1436 entry.ipv4_hnapt.new_sip =
1437 foe->ipv4_hnapt.new_sip;
1438 entry.ipv4_hnapt.new_dip =
1439 foe->ipv4_hnapt.new_dip;
1440 entry.ipv4_hnapt.etype = htons(ETH_P_IP);
1441
developer34028fb2022-01-11 13:51:29 +08001442 if (IS_HQOS_MODE) {
developeraf07fad2021-11-19 17:53:42 +08001443 entry.ipv4_hnapt.iblk2.qid =
developerd35bbcc2022-09-28 22:46:01 +08001444 (hnat_priv->data->version == MTK_HNAT_V4 ||
1445 hnat_priv->data->version == MTK_HNAT_V5) ?
developeraf07fad2021-11-19 17:53:42 +08001446 skb->mark & 0x7f : skb->mark & 0xf;
developerd35bbcc2022-09-28 22:46:01 +08001447#if defined(CONFIG_MEDIATEK_NETSYS_V3)
1448 entry.ipv4_hnapt.tport_id = 1;
1449#else
developeraf07fad2021-11-19 17:53:42 +08001450 entry.ipv4_hnapt.iblk2.fqos = 1;
developerd35bbcc2022-09-28 22:46:01 +08001451#endif
developeraf07fad2021-11-19 17:53:42 +08001452 }
developerfd40db22021-04-29 10:08:25 +08001453
1454 entry.ipv4_hnapt.bfib1.udp =
1455 foe->ipv4_hnapt.bfib1.udp;
1456
1457 entry.ipv4_hnapt.new_sport =
1458 foe->ipv4_hnapt.new_sport;
1459 entry.ipv4_hnapt.new_dport =
1460 foe->ipv4_hnapt.new_dport;
1461 mape_l2w_v6h = *ip6h;
1462 }
1463 break;
1464
1465 default:
1466 return -1;
1467 }
1468
1469 trace_printk(
1470 "[%s]skb->head=%p, skb->data=%p,ipv6_hdr=%p, skb->len=%d, skb->data_len=%d\n",
1471 __func__, skb->head, skb->data, ip6h, skb->len,
1472 skb->data_len);
1473 break;
1474
1475 default:
developerfd40db22021-04-29 10:08:25 +08001476 iph = ip_hdr(skb);
1477 switch (entry.bfib1.pkt_type) {
1478 case IPV6_6RD: /* 6RD LAN->WAN */
1479 entry.ipv6_6rd.ipv6_sip0 = foe->ipv6_6rd.ipv6_sip0;
1480 entry.ipv6_6rd.ipv6_sip1 = foe->ipv6_6rd.ipv6_sip1;
1481 entry.ipv6_6rd.ipv6_sip2 = foe->ipv6_6rd.ipv6_sip2;
1482 entry.ipv6_6rd.ipv6_sip3 = foe->ipv6_6rd.ipv6_sip3;
1483
1484 entry.ipv6_6rd.ipv6_dip0 = foe->ipv6_6rd.ipv6_dip0;
1485 entry.ipv6_6rd.ipv6_dip1 = foe->ipv6_6rd.ipv6_dip1;
1486 entry.ipv6_6rd.ipv6_dip2 = foe->ipv6_6rd.ipv6_dip2;
1487 entry.ipv6_6rd.ipv6_dip3 = foe->ipv6_6rd.ipv6_dip3;
1488
1489 entry.ipv6_6rd.sport = foe->ipv6_6rd.sport;
1490 entry.ipv6_6rd.dport = foe->ipv6_6rd.dport;
1491 entry.ipv6_6rd.tunnel_sipv4 = ntohl(iph->saddr);
1492 entry.ipv6_6rd.tunnel_dipv4 = ntohl(iph->daddr);
1493 entry.ipv6_6rd.hdr_chksum = ppe_get_chkbase(iph);
1494 entry.ipv6_6rd.flag = (ntohs(iph->frag_off) >> 13);
1495 entry.ipv6_6rd.ttl = iph->ttl;
1496 entry.ipv6_6rd.dscp = iph->tos;
1497 entry.ipv6_6rd.per_flow_6rd_id = 1;
1498 entry.ipv6_6rd.vlan1 = hw_path->vlan_id;
1499 if (hnat_priv->data->per_flow_accounting)
1500 entry.ipv6_6rd.iblk2.mibf = 1;
1501 break;
1502
1503 default:
1504 return -1;
1505 }
1506 }
1507
1508 /* Fill Layer2 Info.*/
1509 entry = ppe_fill_L2_info(eth, entry, hw_path);
1510
1511 /* Fill Info Blk*/
1512 entry = ppe_fill_info_blk(eth, entry, hw_path);
1513
1514 if (IS_LAN(dev)) {
1515 if (IS_DSA_LAN(dev))
developeraf07fad2021-11-19 17:53:42 +08001516 port_id = hnat_dsa_fill_stag(dev, &entry, hw_path,
1517 ntohs(eth->h_proto),
1518 mape);
developerfd40db22021-04-29 10:08:25 +08001519
1520 if (IS_BOND_MODE)
1521 gmac = ((skb_hnat_entry(skb) >> 1) % hnat_priv->gmac_num) ?
1522 NR_GMAC2_PORT : NR_GMAC1_PORT;
1523 else
1524 gmac = NR_GMAC1_PORT;
developerd35bbcc2022-09-28 22:46:01 +08001525 } else if (IS_LAN2(dev)) {
1526 gmac = NR_GMAC3_PORT;
developerfd40db22021-04-29 10:08:25 +08001527 } else if (IS_WAN(dev)) {
1528 if (IS_DSA_WAN(dev))
developeraf07fad2021-11-19 17:53:42 +08001529 port_id = hnat_dsa_fill_stag(dev,&entry, hw_path,
1530 ntohs(eth->h_proto),
1531 mape);
developerfd40db22021-04-29 10:08:25 +08001532 if (mape_toggle && mape == 1) {
1533 gmac = NR_PDMA_PORT;
1534 /* Set act_dp = wan_dev */
1535 entry.ipv4_hnapt.act_dp = dev->ifindex;
1536 } else {
1537 gmac = (IS_GMAC1_MODE) ? NR_GMAC1_PORT : NR_GMAC2_PORT;
1538 }
developerd35bbcc2022-09-28 22:46:01 +08001539 } else if (IS_EXT(dev) && (FROM_GE_PPD(skb) || FROM_GE_LAN_GRP(skb) ||
developer99506e52021-06-30 22:03:02 +08001540 FROM_GE_WAN(skb) || FROM_GE_VIRTUAL(skb) || FROM_WED(skb))) {
developerfd40db22021-04-29 10:08:25 +08001541 if (!hnat_priv->data->whnat && IS_GMAC1_MODE) {
1542 entry.bfib1.vpm = 1;
1543 entry.bfib1.vlan_layer = 1;
1544
1545 if (FROM_GE_LAN(skb))
1546 entry.ipv4_hnapt.vlan1 = 1;
1547 else if (FROM_GE_WAN(skb) || FROM_GE_VIRTUAL(skb))
1548 entry.ipv4_hnapt.vlan1 = 2;
1549 }
1550
1551 trace_printk("learn of lan or wan(iif=%x) --> %s(ext)\n",
1552 skb_hnat_iface(skb), dev->name);
1553 /* To CPU then stolen by pre-routing hant hook of LAN/WAN
1554 * Current setting is PDMA RX.
1555 */
1556 gmac = NR_PDMA_PORT;
1557 if (IS_IPV4_GRP(foe))
1558 entry.ipv4_hnapt.act_dp = dev->ifindex;
1559 else
1560 entry.ipv6_5t_route.act_dp = dev->ifindex;
1561 } else {
1562 printk_ratelimited(KERN_WARNING
1563 "Unknown case of dp, iif=%x --> %s\n",
1564 skb_hnat_iface(skb), dev->name);
1565
1566 return 0;
1567 }
1568
developerafff5662022-06-29 10:09:56 +08001569 if (IS_HQOS_MODE || skb->mark >= MAX_PPPQ_PORT_NUM)
developeraf07fad2021-11-19 17:53:42 +08001570 qid = skb->mark & (MTK_QDMA_TX_MASK);
developerdac6ac72022-08-18 15:32:19 +08001571 else if (IS_PPPQ_MODE && (IS_DSA_1G_LAN(dev) || IS_DSA_WAN(dev) ||
1572 (FROM_WED(skb) && IS_DSA_LAN(dev))))
developeraf07fad2021-11-19 17:53:42 +08001573 qid = port_id & MTK_QDMA_TX_MASK;
1574 else
1575 qid = 0;
developerfd40db22021-04-29 10:08:25 +08001576
1577 if (IS_IPV4_GRP(foe)) {
1578 entry.ipv4_hnapt.iblk2.dp = gmac;
1579 entry.ipv4_hnapt.iblk2.port_mg =
1580 (hnat_priv->data->version == MTK_HNAT_V1) ? 0x3f : 0;
developer24948202021-11-24 17:38:27 +08001581
developeraf07fad2021-11-19 17:53:42 +08001582 if (qos_toggle) {
developerd35bbcc2022-09-28 22:46:01 +08001583 if (hnat_priv->data->version == MTK_HNAT_V4 ||
1584 hnat_priv->data->version == MTK_HNAT_V5) {
developeraf07fad2021-11-19 17:53:42 +08001585 entry.ipv4_hnapt.iblk2.qid = qid & 0x7f;
1586 } else {
1587 /* qid[5:0]= port_mg[1:0]+ qid[3:0] */
1588 entry.ipv4_hnapt.iblk2.qid = qid & 0xf;
1589 if (hnat_priv->data->version != MTK_HNAT_V1)
1590 entry.ipv4_hnapt.iblk2.port_mg |=
1591 ((qid >> 4) & 0x3);
developerfd40db22021-04-29 10:08:25 +08001592
developerd35bbcc2022-09-28 22:46:01 +08001593 if (((IS_EXT(dev) && (FROM_GE_LAN_GRP(skb) ||
developeraf07fad2021-11-19 17:53:42 +08001594 FROM_GE_WAN(skb) || FROM_GE_VIRTUAL(skb))) ||
1595 ((mape_toggle && mape == 1) && !FROM_EXT(skb))) &&
1596 (!whnat)) {
1597 entry.ipv4_hnapt.etype = htons(HQOS_MAGIC_TAG);
1598 entry.ipv4_hnapt.vlan1 = skb_hnat_entry(skb);
1599 entry.bfib1.vlan_layer = 1;
1600 }
developerfd40db22021-04-29 10:08:25 +08001601 }
developerfd40db22021-04-29 10:08:25 +08001602
developer34028fb2022-01-11 13:51:29 +08001603 if (FROM_EXT(skb) || skb_hnat_sport(skb) == NR_QDMA_PORT ||
1604 (IS_PPPQ_MODE && !IS_DSA_LAN(dev) && !IS_DSA_WAN(dev)))
developeraf07fad2021-11-19 17:53:42 +08001605 entry.ipv4_hnapt.iblk2.fqos = 0;
1606 else
developerd35bbcc2022-09-28 22:46:01 +08001607#if defined(CONFIG_MEDIATEK_NETSYS_V3)
1608 entry.ipv4_hnapt.tport_id = 1;
1609#else
developer399ec072022-06-24 16:07:41 +08001610 entry.ipv4_hnapt.iblk2.fqos =
1611 (!IS_PPPQ_MODE || (IS_PPPQ_MODE &&
developerdac6ac72022-08-18 15:32:19 +08001612 (IS_DSA_1G_LAN(dev) || IS_DSA_WAN(dev) ||
1613 (FROM_WED(skb) && IS_DSA_LAN(dev)))));
developerd35bbcc2022-09-28 22:46:01 +08001614#endif
developeraf07fad2021-11-19 17:53:42 +08001615 } else {
developerfd40db22021-04-29 10:08:25 +08001616 entry.ipv4_hnapt.iblk2.fqos = 0;
developeraf07fad2021-11-19 17:53:42 +08001617 }
developerfd40db22021-04-29 10:08:25 +08001618 } else {
1619 entry.ipv6_5t_route.iblk2.dp = gmac;
1620 entry.ipv6_5t_route.iblk2.port_mg =
1621 (hnat_priv->data->version == MTK_HNAT_V1) ? 0x3f : 0;
developer24948202021-11-24 17:38:27 +08001622
developeraf07fad2021-11-19 17:53:42 +08001623 if (qos_toggle) {
developerd35bbcc2022-09-28 22:46:01 +08001624 if (hnat_priv->data->version == MTK_HNAT_V4 ||
1625 hnat_priv->data->version == MTK_HNAT_V5) {
developeraf07fad2021-11-19 17:53:42 +08001626 entry.ipv6_5t_route.iblk2.qid = qid & 0x7f;
1627 } else {
1628 /* qid[5:0]= port_mg[1:0]+ qid[3:0] */
1629 entry.ipv6_5t_route.iblk2.qid = qid & 0xf;
1630 if (hnat_priv->data->version != MTK_HNAT_V1)
1631 entry.ipv6_5t_route.iblk2.port_mg |=
1632 ((qid >> 4) & 0x3);
developerfd40db22021-04-29 10:08:25 +08001633
developerd35bbcc2022-09-28 22:46:01 +08001634 if (IS_EXT(dev) && (FROM_GE_LAN_GRP(skb) ||
developeraf07fad2021-11-19 17:53:42 +08001635 FROM_GE_WAN(skb) || FROM_GE_VIRTUAL(skb)) &&
1636 (!whnat)) {
1637 entry.ipv6_5t_route.etype = htons(HQOS_MAGIC_TAG);
1638 entry.ipv6_5t_route.vlan1 = skb_hnat_entry(skb);
1639 entry.bfib1.vlan_layer = 1;
1640 }
developerfd40db22021-04-29 10:08:25 +08001641 }
developerfd40db22021-04-29 10:08:25 +08001642
developer34028fb2022-01-11 13:51:29 +08001643 if (FROM_EXT(skb) ||
1644 (IS_PPPQ_MODE && !IS_DSA_LAN(dev) && !IS_DSA_WAN(dev)))
developeraf07fad2021-11-19 17:53:42 +08001645 entry.ipv6_5t_route.iblk2.fqos = 0;
1646 else
developerd35bbcc2022-09-28 22:46:01 +08001647#if defined(CONFIG_MEDIATEK_NETSYS_V3)
1648 entry.ipv6_5t_route.tport_id = 1;
1649#else
developer399ec072022-06-24 16:07:41 +08001650 entry.ipv6_5t_route.iblk2.fqos =
1651 (!IS_PPPQ_MODE || (IS_PPPQ_MODE &&
developerdac6ac72022-08-18 15:32:19 +08001652 (IS_DSA_1G_LAN(dev) || IS_DSA_WAN(dev) ||
1653 (FROM_WED(skb) && IS_DSA_LAN(dev)))));
developerd35bbcc2022-09-28 22:46:01 +08001654#endif
developeraf07fad2021-11-19 17:53:42 +08001655 } else {
developerfd40db22021-04-29 10:08:25 +08001656 entry.ipv6_5t_route.iblk2.fqos = 0;
developeraf07fad2021-11-19 17:53:42 +08001657 }
developerfd40db22021-04-29 10:08:25 +08001658 }
1659
developer60e60962021-06-15 21:05:07 +08001660 /* The INFO2.port_mg and 2nd VLAN ID fields of PPE entry are redefined
1661 * by Wi-Fi whnat engine. These data and INFO2.dp will be updated and
1662 * the entry is set to BIND state in mtk_sw_nat_hook_tx().
1663 */
developer7b36dca2022-05-19 18:29:10 +08001664 if (!whnat) {
1665 entry.bfib1.ttl = 1;
developer60e60962021-06-15 21:05:07 +08001666 entry.bfib1.state = BIND;
developer7b36dca2022-05-19 18:29:10 +08001667 }
developer60e60962021-06-15 21:05:07 +08001668
developerbc552cc2022-03-15 16:19:27 +08001669 wmb();
developerfd40db22021-04-29 10:08:25 +08001670 memcpy(foe, &entry, sizeof(entry));
1671 /*reset statistic for this entry*/
1672 if (hnat_priv->data->per_flow_accounting)
developer471f6562021-05-10 20:48:34 +08001673 memset(&hnat_priv->acct[skb_hnat_ppe(skb)][skb_hnat_entry(skb)],
1674 0, sizeof(struct mib_entry));
developerfd40db22021-04-29 10:08:25 +08001675
developerfdfe1572021-09-13 16:56:33 +08001676 skb_hnat_filled(skb) = HNAT_INFO_FILLED;
developerfd40db22021-04-29 10:08:25 +08001677
1678 return 0;
1679}
1680
1681int mtk_sw_nat_hook_tx(struct sk_buff *skb, int gmac_no)
1682{
1683 struct foe_entry *entry;
1684 struct ethhdr *eth;
developerbc552cc2022-03-15 16:19:27 +08001685 struct hnat_bind_info_blk bfib1_tx;
developerfd40db22021-04-29 10:08:25 +08001686
developerfdfe1572021-09-13 16:56:33 +08001687 if (skb_hnat_alg(skb) || !is_hnat_info_filled(skb) ||
1688 !is_magic_tag_valid(skb) || !IS_SPACE_AVAILABLE_HEAD(skb))
developerfd40db22021-04-29 10:08:25 +08001689 return NF_ACCEPT;
1690
1691 trace_printk(
1692 "[%s]entry=%x reason=%x gmac_no=%x wdmaid=%x rxid=%x wcid=%x bssid=%x\n",
1693 __func__, skb_hnat_entry(skb), skb_hnat_reason(skb), gmac_no,
1694 skb_hnat_wdma_id(skb), skb_hnat_bss_id(skb),
1695 skb_hnat_wc_id(skb), skb_hnat_rx_id(skb));
1696
developer99506e52021-06-30 22:03:02 +08001697 if ((gmac_no != NR_WDMA0_PORT) && (gmac_no != NR_WDMA1_PORT) &&
1698 (gmac_no != NR_WHNAT_WDMA_PORT))
1699 return NF_ACCEPT;
1700
developerfd40db22021-04-29 10:08:25 +08001701 if (!skb_hnat_is_hashed(skb))
1702 return NF_ACCEPT;
1703
developer955a6f62021-07-26 10:54:39 +08001704 if (skb_hnat_entry(skb) >= hnat_priv->foe_etry_num ||
1705 skb_hnat_ppe(skb) >= CFG_PPE_NUM)
1706 return NF_ACCEPT;
1707
developer471f6562021-05-10 20:48:34 +08001708 entry = &hnat_priv->foe_table_cpu[skb_hnat_ppe(skb)][skb_hnat_entry(skb)];
developerfd40db22021-04-29 10:08:25 +08001709 if (entry_hnat_is_bound(entry))
1710 return NF_ACCEPT;
1711
1712 if (skb_hnat_reason(skb) != HIT_UNBIND_RATE_REACH)
1713 return NF_ACCEPT;
1714
1715 eth = eth_hdr(skb);
developerbc552cc2022-03-15 16:19:27 +08001716 memcpy(&bfib1_tx, &entry->bfib1, sizeof(entry->bfib1));
developer8116b0a2021-08-23 18:07:20 +08001717
1718 /*not bind multicast if PPE mcast not enable*/
developerfdfe1572021-09-13 16:56:33 +08001719 if (!hnat_priv->data->mcast) {
1720 if (is_multicast_ether_addr(eth->h_dest))
1721 return NF_ACCEPT;
1722
1723 if (IS_IPV4_GRP(entry))
1724 entry->ipv4_hnapt.iblk2.mcast = 0;
1725 else
1726 entry->ipv6_5t_route.iblk2.mcast = 0;
1727 }
developerfd40db22021-04-29 10:08:25 +08001728
1729 /* Some mt_wifi virtual interfaces, such as apcli,
1730 * will change the smac for specail purpose.
1731 */
developer5ffc5f12022-10-25 18:51:46 +08001732 switch ((int)bfib1_tx.pkt_type) {
developerfd40db22021-04-29 10:08:25 +08001733 case IPV4_HNAPT:
1734 case IPV4_HNAT:
1735 entry->ipv4_hnapt.smac_hi = swab32(*((u32 *)eth->h_source));
1736 entry->ipv4_hnapt.smac_lo = swab16(*((u16 *)&eth->h_source[4]));
1737 break;
1738 case IPV4_DSLITE:
1739 case IPV4_MAP_E:
1740 case IPV6_6RD:
1741 case IPV6_5T_ROUTE:
1742 case IPV6_3T_ROUTE:
developer5ffc5f12022-10-25 18:51:46 +08001743 case IPV6_HNAPT:
1744 case IPV6_HNAT:
developerfd40db22021-04-29 10:08:25 +08001745 entry->ipv6_5t_route.smac_hi = swab32(*((u32 *)eth->h_source));
1746 entry->ipv6_5t_route.smac_lo = swab16(*((u16 *)&eth->h_source[4]));
1747 break;
1748 }
1749
developer0ff76882021-10-26 10:54:13 +08001750 if (skb->vlan_tci) {
developerbc552cc2022-03-15 16:19:27 +08001751 bfib1_tx.vlan_layer = 1;
1752 bfib1_tx.vpm = 1;
developer0ff76882021-10-26 10:54:13 +08001753 if (IS_IPV4_GRP(entry)) {
1754 entry->ipv4_hnapt.etype = htons(ETH_P_8021Q);
developer00a07372022-03-11 16:04:34 +08001755 entry->ipv4_hnapt.vlan1 = skb->vlan_tci;
developer0ff76882021-10-26 10:54:13 +08001756 } else if (IS_IPV6_GRP(entry)) {
1757 entry->ipv6_5t_route.etype = htons(ETH_P_8021Q);
developer00a07372022-03-11 16:04:34 +08001758 entry->ipv6_5t_route.vlan1 = skb->vlan_tci;
developer0ff76882021-10-26 10:54:13 +08001759 }
1760 } else {
developerbc552cc2022-03-15 16:19:27 +08001761 bfib1_tx.vpm = 0;
1762 bfib1_tx.vlan_layer = 0;
developer0ff76882021-10-26 10:54:13 +08001763 }
developer60e60962021-06-15 21:05:07 +08001764
developerfd40db22021-04-29 10:08:25 +08001765 /* MT7622 wifi hw_nat not support QoS */
1766 if (IS_IPV4_GRP(entry)) {
1767 entry->ipv4_hnapt.iblk2.fqos = 0;
developere567ad32021-05-25 17:16:17 +08001768 if ((hnat_priv->data->version == MTK_HNAT_V2 &&
1769 gmac_no == NR_WHNAT_WDMA_PORT) ||
developerd35bbcc2022-09-28 22:46:01 +08001770 ((hnat_priv->data->version == MTK_HNAT_V4 ||
1771 hnat_priv->data->version == MTK_HNAT_V5) &&
developere567ad32021-05-25 17:16:17 +08001772 (gmac_no == NR_WDMA0_PORT || gmac_no == NR_WDMA1_PORT))) {
developerfd40db22021-04-29 10:08:25 +08001773 entry->ipv4_hnapt.winfo.bssid = skb_hnat_bss_id(skb);
1774 entry->ipv4_hnapt.winfo.wcid = skb_hnat_wc_id(skb);
developerd35bbcc2022-09-28 22:46:01 +08001775#if defined(CONFIG_MEDIATEK_NETSYS_V3)
1776 entry->ipv4_hnapt.tport_id = (IS_HQOS_MODE) ? 1 : 0;
1777 entry->ipv4_hnapt.iblk2.rxid = skb_hnat_rx_id(skb);
1778 entry->ipv4_hnapt.iblk2.winfoi = 1;
1779 entry->ipv4_hnapt.winfo_pao.usr_info =
1780 skb_hnat_usr_info(skb);
1781 entry->ipv4_hnapt.winfo_pao.tid = skb_hnat_tid(skb);
1782 entry->ipv4_hnapt.winfo_pao.is_fixedrate =
1783 skb_hnat_is_fixedrate(skb);
1784 entry->ipv4_hnapt.winfo_pao.is_prior =
1785 skb_hnat_is_prior(skb);
1786 entry->ipv4_hnapt.winfo_pao.is_sp = skb_hnat_is_sp(skb);
1787 entry->ipv4_hnapt.winfo_pao.hf = skb_hnat_hf(skb);
1788 entry->ipv4_hnapt.winfo_pao.amsdu = skb_hnat_amsdu(skb);
1789#elif defined(CONFIG_MEDIATEK_NETSYS_V2)
developerfd40db22021-04-29 10:08:25 +08001790 entry->ipv4_hnapt.iblk2.rxid = skb_hnat_rx_id(skb);
1791 entry->ipv4_hnapt.iblk2.winfoi = 1;
1792#else
1793 entry->ipv4_hnapt.winfo.rxid = skb_hnat_rx_id(skb);
1794 entry->ipv4_hnapt.iblk2w.winfoi = 1;
1795 entry->ipv4_hnapt.iblk2w.wdmaid = skb_hnat_wdma_id(skb);
1796#endif
1797 } else {
1798 if (IS_GMAC1_MODE && !hnat_dsa_is_enable(hnat_priv)) {
developerbc552cc2022-03-15 16:19:27 +08001799 bfib1_tx.vpm = 1;
1800 bfib1_tx.vlan_layer = 1;
developerfd40db22021-04-29 10:08:25 +08001801
developerd35bbcc2022-09-28 22:46:01 +08001802 if (FROM_GE_LAN_GRP(skb))
developerfd40db22021-04-29 10:08:25 +08001803 entry->ipv4_hnapt.vlan1 = 1;
1804 else if (FROM_GE_WAN(skb) || FROM_GE_VIRTUAL(skb))
1805 entry->ipv4_hnapt.vlan1 = 2;
1806 }
1807
developer34028fb2022-01-11 13:51:29 +08001808 if (IS_HQOS_MODE &&
developerd35bbcc2022-09-28 22:46:01 +08001809 (FROM_GE_LAN_GRP(skb) || FROM_GE_WAN(skb) || FROM_GE_VIRTUAL(skb))) {
developerbc552cc2022-03-15 16:19:27 +08001810 bfib1_tx.vpm = 0;
1811 bfib1_tx.vlan_layer = 1;
developerfd40db22021-04-29 10:08:25 +08001812 entry->ipv4_hnapt.etype = htons(HQOS_MAGIC_TAG);
1813 entry->ipv4_hnapt.vlan1 = skb_hnat_entry(skb);
1814 entry->ipv4_hnapt.iblk2.fqos = 1;
1815 }
developerfd40db22021-04-29 10:08:25 +08001816 }
1817 entry->ipv4_hnapt.iblk2.dp = gmac_no;
developer5ffc5f12022-10-25 18:51:46 +08001818#if defined(CONFIG_MEDIATEK_NETSYS_V3)
1819 } else if (IS_IPV6_HNAPT(entry) || IS_IPV6_HNAT(entry)) {
1820 entry->ipv6_hnapt.iblk2.dp = gmac_no;
1821 entry->ipv6_hnapt.iblk2.rxid = skb_hnat_rx_id(skb);
1822 entry->ipv6_hnapt.iblk2.winfoi = 1;
1823
1824 entry->ipv6_hnapt.winfo.bssid = skb_hnat_bss_id(skb);
1825 entry->ipv6_hnapt.winfo.wcid = skb_hnat_wc_id(skb);
1826 entry->ipv6_hnapt.winfo_pao.usr_info = skb_hnat_usr_info(skb);
1827 entry->ipv6_hnapt.winfo_pao.tid = skb_hnat_tid(skb);
1828 entry->ipv6_hnapt.winfo_pao.is_fixedrate =
1829 skb_hnat_is_fixedrate(skb);
1830 entry->ipv6_hnapt.winfo_pao.is_prior = skb_hnat_is_prior(skb);
1831 entry->ipv6_hnapt.winfo_pao.is_sp = skb_hnat_is_sp(skb);
1832 entry->ipv6_hnapt.winfo_pao.hf = skb_hnat_hf(skb);
1833 entry->ipv6_hnapt.winfo_pao.amsdu = skb_hnat_amsdu(skb);
1834 entry->ipv6_hnapt.tport_id = (IS_HQOS_MODE) ? 1 : 0;
1835#endif
developerfd40db22021-04-29 10:08:25 +08001836 } else {
1837 entry->ipv6_5t_route.iblk2.fqos = 0;
developere567ad32021-05-25 17:16:17 +08001838 if ((hnat_priv->data->version == MTK_HNAT_V2 &&
1839 gmac_no == NR_WHNAT_WDMA_PORT) ||
developerd35bbcc2022-09-28 22:46:01 +08001840 ((hnat_priv->data->version == MTK_HNAT_V4 ||
1841 hnat_priv->data->version == MTK_HNAT_V5) &&
developere567ad32021-05-25 17:16:17 +08001842 (gmac_no == NR_WDMA0_PORT || gmac_no == NR_WDMA1_PORT))) {
developerfd40db22021-04-29 10:08:25 +08001843 entry->ipv6_5t_route.winfo.bssid = skb_hnat_bss_id(skb);
1844 entry->ipv6_5t_route.winfo.wcid = skb_hnat_wc_id(skb);
developerd35bbcc2022-09-28 22:46:01 +08001845#if defined(CONFIG_MEDIATEK_NETSYS_V3)
1846 entry->ipv6_5t_route.tport_id = (IS_HQOS_MODE) ? 1 : 0;
developerfd40db22021-04-29 10:08:25 +08001847 entry->ipv6_5t_route.iblk2.rxid = skb_hnat_rx_id(skb);
1848 entry->ipv6_5t_route.iblk2.winfoi = 1;
developerd35bbcc2022-09-28 22:46:01 +08001849 entry->ipv6_5t_route.winfo_pao.usr_info =
1850 skb_hnat_usr_info(skb);
1851 entry->ipv6_5t_route.winfo_pao.tid =
1852 skb_hnat_tid(skb);
1853 entry->ipv6_5t_route.winfo_pao.is_fixedrate =
1854 skb_hnat_is_fixedrate(skb);
1855 entry->ipv6_5t_route.winfo_pao.is_prior =
1856 skb_hnat_is_prior(skb);
1857 entry->ipv6_5t_route.winfo_pao.is_sp =
1858 skb_hnat_is_sp(skb);
1859 entry->ipv6_5t_route.winfo_pao.hf =
1860 skb_hnat_hf(skb);
1861 entry->ipv6_5t_route.winfo_pao.amsdu =
1862 skb_hnat_amsdu(skb);
1863#elif defined(CONFIG_MEDIATEK_NETSYS_V2)
1864 entry->ipv6_5t_route.iblk2.rxid = skb_hnat_rx_id(skb);
1865 entry->ipv6_5t_route.iblk2.winfoi = 1;
developerfd40db22021-04-29 10:08:25 +08001866#else
1867 entry->ipv6_5t_route.winfo.rxid = skb_hnat_rx_id(skb);
1868 entry->ipv6_5t_route.iblk2w.winfoi = 1;
1869 entry->ipv6_5t_route.iblk2w.wdmaid = skb_hnat_wdma_id(skb);
1870#endif
1871 } else {
1872 if (IS_GMAC1_MODE && !hnat_dsa_is_enable(hnat_priv)) {
developerbc552cc2022-03-15 16:19:27 +08001873 bfib1_tx.vpm = 1;
1874 bfib1_tx.vlan_layer = 1;
developerfd40db22021-04-29 10:08:25 +08001875
developerd35bbcc2022-09-28 22:46:01 +08001876 if (FROM_GE_LAN_GRP(skb))
developerfd40db22021-04-29 10:08:25 +08001877 entry->ipv6_5t_route.vlan1 = 1;
1878 else if (FROM_GE_WAN(skb) || FROM_GE_VIRTUAL(skb))
1879 entry->ipv6_5t_route.vlan1 = 2;
1880 }
1881
developer34028fb2022-01-11 13:51:29 +08001882 if (IS_HQOS_MODE &&
developerd35bbcc2022-09-28 22:46:01 +08001883 (FROM_GE_LAN_GRP(skb) || FROM_GE_WAN(skb) || FROM_GE_VIRTUAL(skb))) {
developerbc552cc2022-03-15 16:19:27 +08001884 bfib1_tx.vpm = 0;
1885 bfib1_tx.vlan_layer = 1;
developerfd40db22021-04-29 10:08:25 +08001886 entry->ipv6_5t_route.etype = htons(HQOS_MAGIC_TAG);
1887 entry->ipv6_5t_route.vlan1 = skb_hnat_entry(skb);
1888 entry->ipv6_5t_route.iblk2.fqos = 1;
1889 }
developerfd40db22021-04-29 10:08:25 +08001890 }
1891 entry->ipv6_5t_route.iblk2.dp = gmac_no;
1892 }
1893
developer7b36dca2022-05-19 18:29:10 +08001894 bfib1_tx.ttl = 1;
developerbc552cc2022-03-15 16:19:27 +08001895 bfib1_tx.state = BIND;
1896 wmb();
1897 memcpy(&entry->bfib1, &bfib1_tx, sizeof(bfib1_tx));
developerfd40db22021-04-29 10:08:25 +08001898
1899 return NF_ACCEPT;
1900}
1901
1902int mtk_sw_nat_hook_rx(struct sk_buff *skb)
1903{
developer99506e52021-06-30 22:03:02 +08001904 if (!IS_SPACE_AVAILABLE_HEAD(skb) || !FROM_WED(skb)) {
1905 skb_hnat_magic_tag(skb) = 0;
developerfd40db22021-04-29 10:08:25 +08001906 return NF_ACCEPT;
developer99506e52021-06-30 22:03:02 +08001907 }
developerfd40db22021-04-29 10:08:25 +08001908
1909 skb_hnat_alg(skb) = 0;
developerfdfe1572021-09-13 16:56:33 +08001910 skb_hnat_filled(skb) = 0;
developerfd40db22021-04-29 10:08:25 +08001911 skb_hnat_magic_tag(skb) = HNAT_MAGIC_TAG;
1912
1913 if (skb_hnat_iface(skb) == FOE_MAGIC_WED0)
1914 skb_hnat_sport(skb) = NR_WDMA0_PORT;
1915 else if (skb_hnat_iface(skb) == FOE_MAGIC_WED1)
1916 skb_hnat_sport(skb) = NR_WDMA1_PORT;
1917
1918 return NF_ACCEPT;
1919}
1920
1921void mtk_ppe_dev_register_hook(struct net_device *dev)
1922{
1923 int i, number = 0;
1924 struct extdev_entry *ext_entry;
1925
developerfd40db22021-04-29 10:08:25 +08001926 for (i = 1; i < MAX_IF_NUM; i++) {
1927 if (hnat_priv->wifi_hook_if[i] == dev) {
1928 pr_info("%s : %s has been registered in wifi_hook_if table[%d]\n",
1929 __func__, dev->name, i);
1930 return;
1931 }
1932 if (!hnat_priv->wifi_hook_if[i]) {
1933 if (find_extif_from_devname(dev->name)) {
1934 extif_set_dev(dev);
1935 goto add_wifi_hook_if;
1936 }
1937
1938 number = get_ext_device_number();
1939 if (number >= MAX_EXT_DEVS) {
1940 pr_info("%s : extdev array is full. %s is not registered\n",
1941 __func__, dev->name);
1942 return;
1943 }
1944
1945 ext_entry = kzalloc(sizeof(*ext_entry), GFP_KERNEL);
1946 if (!ext_entry)
1947 return;
1948
developer4c32b7a2021-11-13 16:46:43 +08001949 strncpy(ext_entry->name, dev->name, IFNAMSIZ - 1);
developerfd40db22021-04-29 10:08:25 +08001950 dev_hold(dev);
1951 ext_entry->dev = dev;
1952 ext_if_add(ext_entry);
1953
1954add_wifi_hook_if:
1955 dev_hold(dev);
1956 hnat_priv->wifi_hook_if[i] = dev;
1957
1958 break;
1959 }
1960 }
1961 pr_info("%s : ineterface %s register (%d)\n", __func__, dev->name, i);
1962}
1963
1964void mtk_ppe_dev_unregister_hook(struct net_device *dev)
1965{
1966 int i;
1967
1968 for (i = 1; i < MAX_IF_NUM; i++) {
1969 if (hnat_priv->wifi_hook_if[i] == dev) {
1970 hnat_priv->wifi_hook_if[i] = NULL;
1971 dev_put(dev);
1972
1973 break;
1974 }
1975 }
1976
1977 extif_put_dev(dev);
1978 pr_info("%s : ineterface %s set null (%d)\n", __func__, dev->name, i);
1979}
1980
1981static unsigned int mtk_hnat_accel_type(struct sk_buff *skb)
1982{
1983 struct dst_entry *dst;
1984 struct nf_conn *ct;
1985 enum ip_conntrack_info ctinfo;
1986 const struct nf_conn_help *help;
1987
1988 /* Do not accelerate 1st round of xfrm flow, and 2nd round of xfrm flow
1989 * is from local_out which is also filtered in sanity check.
1990 */
1991 dst = skb_dst(skb);
1992 if (dst && dst_xfrm(dst))
1993 return 0;
1994
1995 ct = nf_ct_get(skb, &ctinfo);
1996 if (!ct)
1997 return 1;
1998
1999 /* rcu_read_lock()ed by nf_hook_slow */
2000 help = nfct_help(ct);
2001 if (help && rcu_dereference(help->helper))
2002 return 0;
2003
2004 return 1;
2005}
2006
developer6f4a0c72021-10-19 10:04:22 +08002007static void mtk_hnat_dscp_update(struct sk_buff *skb, struct foe_entry *entry)
2008{
2009 struct iphdr *iph;
2010 struct ethhdr *eth;
2011 struct ipv6hdr *ip6h;
2012 bool flag = false;
2013
2014 eth = eth_hdr(skb);
2015 switch (ntohs(eth->h_proto)) {
2016 case ETH_P_IP:
2017 iph = ip_hdr(skb);
developer001e7be2021-12-09 15:00:27 +08002018 if (IS_IPV4_GRP(entry) && entry->ipv4_hnapt.iblk2.dscp != iph->tos)
developer6f4a0c72021-10-19 10:04:22 +08002019 flag = true;
2020 break;
2021 case ETH_P_IPV6:
2022 ip6h = ipv6_hdr(skb);
developer001e7be2021-12-09 15:00:27 +08002023 if ((IS_IPV6_3T_ROUTE(entry) || IS_IPV6_5T_ROUTE(entry)) &&
2024 (entry->ipv6_5t_route.iblk2.dscp !=
2025 (ip6h->priority << 4 | (ip6h->flow_lbl[0] >> 4))))
developer6f4a0c72021-10-19 10:04:22 +08002026 flag = true;
2027 break;
2028 default:
2029 return;
2030 }
2031
2032 if (flag) {
developer1080dd82022-03-07 19:31:04 +08002033 if (debug_level >= 2)
2034 pr_info("Delete entry idx=%d.\n", skb_hnat_entry(skb));
developer6f4a0c72021-10-19 10:04:22 +08002035 memset(entry, 0, sizeof(struct foe_entry));
2036 hnat_cache_ebl(1);
2037 }
2038}
2039
developer30a47682021-11-02 17:06:14 +08002040static void mtk_hnat_nf_update(struct sk_buff *skb)
2041{
2042 struct nf_conn *ct;
2043 struct nf_conn_acct *acct;
2044 struct nf_conn_counter *counter;
2045 enum ip_conntrack_info ctinfo;
2046 struct hnat_accounting diff;
2047
2048 ct = nf_ct_get(skb, &ctinfo);
2049 if (ct) {
2050 if (!hnat_get_count(hnat_priv, skb_hnat_ppe(skb), skb_hnat_entry(skb), &diff))
2051 return;
2052
2053 acct = nf_conn_acct_find(ct);
2054 if (acct) {
2055 counter = acct->counter;
2056 atomic64_add(diff.packets, &counter[CTINFO2DIR(ctinfo)].packets);
2057 atomic64_add(diff.bytes, &counter[CTINFO2DIR(ctinfo)].bytes);
2058 }
2059 }
2060}
2061
developerfd40db22021-04-29 10:08:25 +08002062static unsigned int mtk_hnat_nf_post_routing(
2063 struct sk_buff *skb, const struct net_device *out,
2064 unsigned int (*fn)(struct sk_buff *, const struct net_device *,
2065 struct flow_offload_hw_path *),
2066 const char *func)
2067{
2068 struct foe_entry *entry;
2069 struct flow_offload_hw_path hw_path = { .dev = (struct net_device*)out,
developere5763512021-05-21 01:04:58 +08002070 .virt_dev = (struct net_device*)out };
developerfd40db22021-04-29 10:08:25 +08002071 const struct net_device *arp_dev = out;
2072
2073 if (skb_hnat_alg(skb) || unlikely(!is_magic_tag_valid(skb) ||
2074 !IS_SPACE_AVAILABLE_HEAD(skb)))
2075 return 0;
2076
2077 if (unlikely(!skb_hnat_is_hashed(skb)))
2078 return 0;
2079
2080 if (out->netdev_ops->ndo_flow_offload_check) {
developere5763512021-05-21 01:04:58 +08002081 out->netdev_ops->ndo_flow_offload_check(&hw_path);
developerfd40db22021-04-29 10:08:25 +08002082 out = (IS_GMAC1_MODE) ? hw_path.virt_dev : hw_path.dev;
2083 }
2084
developerd35bbcc2022-09-28 22:46:01 +08002085 if (!IS_LAN_GRP(out) && !IS_WAN(out) && !IS_EXT(out))
developerfd40db22021-04-29 10:08:25 +08002086 return 0;
2087
2088 trace_printk("[%s] case hit, %x-->%s, reason=%x\n", __func__,
2089 skb_hnat_iface(skb), out->name, skb_hnat_reason(skb));
2090
developer471f6562021-05-10 20:48:34 +08002091 entry = &hnat_priv->foe_table_cpu[skb_hnat_ppe(skb)][skb_hnat_entry(skb)];
developerfd40db22021-04-29 10:08:25 +08002092
2093 switch (skb_hnat_reason(skb)) {
2094 case HIT_UNBIND_RATE_REACH:
2095 if (entry_hnat_is_bound(entry))
2096 break;
2097
2098 if (fn && !mtk_hnat_accel_type(skb))
2099 break;
2100
2101 if (fn && fn(skb, arp_dev, &hw_path))
2102 break;
2103
2104 skb_to_hnat_info(skb, out, entry, &hw_path);
2105 break;
2106 case HIT_BIND_KEEPALIVE_DUP_OLD_HDR:
developer30a47682021-11-02 17:06:14 +08002107 /* update hnat count to nf_conntrack by keepalive */
2108 if (hnat_priv->data->per_flow_accounting && hnat_priv->nf_stat_en)
2109 mtk_hnat_nf_update(skb);
2110
developerfd40db22021-04-29 10:08:25 +08002111 if (fn && !mtk_hnat_accel_type(skb))
2112 break;
2113
developer6f4a0c72021-10-19 10:04:22 +08002114 /* update dscp for qos */
2115 mtk_hnat_dscp_update(skb, entry);
2116
developerfd40db22021-04-29 10:08:25 +08002117 /* update mcast timestamp*/
2118 if (hnat_priv->data->version == MTK_HNAT_V3 &&
2119 hnat_priv->data->mcast && entry->bfib1.sta == 1)
2120 entry->ipv4_hnapt.m_timestamp = foe_timestamp(hnat_priv);
2121
2122 if (entry_hnat_is_bound(entry)) {
2123 memset(skb_hnat_info(skb), 0, FOE_INFO_LEN);
2124
2125 return -1;
2126 }
2127 break;
2128 case HIT_BIND_MULTICAST_TO_CPU:
2129 case HIT_BIND_MULTICAST_TO_GMAC_CPU:
2130 /*do not forward to gdma again,if ppe already done it*/
developerd35bbcc2022-09-28 22:46:01 +08002131 if (IS_LAN_GRP(out) || IS_WAN(out))
developerfd40db22021-04-29 10:08:25 +08002132 return -1;
2133 break;
2134 }
2135
2136 return 0;
2137}
2138
2139static unsigned int
2140mtk_hnat_ipv6_nf_local_out(void *priv, struct sk_buff *skb,
2141 const struct nf_hook_state *state)
2142{
2143 struct foe_entry *entry;
2144 struct ipv6hdr *ip6h;
2145 struct iphdr _iphdr;
2146 const struct iphdr *iph;
2147 struct tcpudphdr _ports;
2148 const struct tcpudphdr *pptr;
2149 int udp = 0;
2150
2151 if (unlikely(!skb_hnat_is_hashed(skb)))
2152 return NF_ACCEPT;
2153
developer471f6562021-05-10 20:48:34 +08002154 entry = &hnat_priv->foe_table_cpu[skb_hnat_ppe(skb)][skb_hnat_entry(skb)];
developerfd40db22021-04-29 10:08:25 +08002155 if (skb_hnat_reason(skb) == HIT_UNBIND_RATE_REACH) {
2156 ip6h = ipv6_hdr(skb);
2157 if (ip6h->nexthdr == NEXTHDR_IPIP) {
2158 /* Map-E LAN->WAN: need to record orig info before fn. */
2159 if (mape_toggle) {
2160 iph = skb_header_pointer(skb, IPV6_HDR_LEN,
2161 sizeof(_iphdr), &_iphdr);
developer4c32b7a2021-11-13 16:46:43 +08002162 if (unlikely(!iph))
2163 return NF_ACCEPT;
2164
developerfd40db22021-04-29 10:08:25 +08002165 switch (iph->protocol) {
2166 case IPPROTO_UDP:
2167 udp = 1;
2168 case IPPROTO_TCP:
2169 break;
2170
2171 default:
2172 return NF_ACCEPT;
2173 }
2174
2175 pptr = skb_header_pointer(skb, IPV6_HDR_LEN + iph->ihl * 4,
2176 sizeof(_ports), &_ports);
developer4c32b7a2021-11-13 16:46:43 +08002177 if (unlikely(!pptr))
2178 return NF_ACCEPT;
2179
developerfd40db22021-04-29 10:08:25 +08002180 entry->bfib1.udp = udp;
2181
developer25fc8c02022-05-06 16:24:02 +08002182 /* Map-E LAN->WAN record inner IPv4 header info. */
developerd35bbcc2022-09-28 22:46:01 +08002183#if defined(CONFIG_MEDIATEK_NETSYS_V2) || defined(CONFIG_MEDIATEK_NETSYS_V3)
developerfd40db22021-04-29 10:08:25 +08002184 entry->bfib1.pkt_type = IPV4_MAP_E;
2185 entry->ipv4_dslite.iblk2.dscp = iph->tos;
developerd35bbcc2022-09-28 22:46:01 +08002186 entry->ipv4_mape.new_sip = ntohl(iph->saddr);
2187 entry->ipv4_mape.new_dip = ntohl(iph->daddr);
2188 entry->ipv4_mape.new_sport = ntohs(pptr->src);
2189 entry->ipv4_mape.new_dport = ntohs(pptr->dst);
developerfd40db22021-04-29 10:08:25 +08002190#else
2191 entry->ipv4_hnapt.iblk2.dscp = iph->tos;
2192 entry->ipv4_hnapt.new_sip = ntohl(iph->saddr);
2193 entry->ipv4_hnapt.new_dip = ntohl(iph->daddr);
2194 entry->ipv4_hnapt.new_sport = ntohs(pptr->src);
2195 entry->ipv4_hnapt.new_dport = ntohs(pptr->dst);
2196#endif
2197 } else {
2198 entry->bfib1.pkt_type = IPV4_DSLITE;
2199 }
2200 }
2201 }
2202 return NF_ACCEPT;
2203}
2204
2205static unsigned int
2206mtk_hnat_ipv6_nf_post_routing(void *priv, struct sk_buff *skb,
2207 const struct nf_hook_state *state)
2208{
2209 post_routing_print(skb, state->in, state->out, __func__);
2210
2211 if (!mtk_hnat_nf_post_routing(skb, state->out, hnat_ipv6_get_nexthop,
2212 __func__))
2213 return NF_ACCEPT;
2214
2215 trace_printk(
2216 "%s:drop (iif=0x%x, out_dev=%s, CB2=0x%x, ppe_hash=0x%x, sport=0x%x, reason=0x%x, alg=0x%x)\n",
2217 __func__, skb_hnat_iface(skb), state->out->name, HNAT_SKB_CB2(skb)->magic,
2218 skb_hnat_entry(skb), skb_hnat_sport(skb), skb_hnat_reason(skb),
2219 skb_hnat_alg(skb));
2220
2221 return NF_DROP;
2222}
2223
2224static unsigned int
2225mtk_hnat_ipv4_nf_post_routing(void *priv, struct sk_buff *skb,
2226 const struct nf_hook_state *state)
2227{
2228 post_routing_print(skb, state->in, state->out, __func__);
2229
2230 if (!mtk_hnat_nf_post_routing(skb, state->out, hnat_ipv4_get_nexthop,
2231 __func__))
2232 return NF_ACCEPT;
2233
2234 trace_printk(
2235 "%s:drop (iif=0x%x, out_dev=%s, CB2=0x%x, ppe_hash=0x%x, sport=0x%x, reason=0x%x, alg=0x%x)\n",
2236 __func__, skb_hnat_iface(skb), state->out->name, HNAT_SKB_CB2(skb)->magic,
2237 skb_hnat_entry(skb), skb_hnat_sport(skb), skb_hnat_reason(skb),
2238 skb_hnat_alg(skb));
2239
2240 return NF_DROP;
2241}
2242
2243static unsigned int
2244mtk_pong_hqos_handler(void *priv, struct sk_buff *skb,
2245 const struct nf_hook_state *state)
2246{
developerfd40db22021-04-29 10:08:25 +08002247 struct vlan_ethhdr *veth = (struct vlan_ethhdr *)skb_mac_header(skb);
2248
developer34028fb2022-01-11 13:51:29 +08002249 if (IS_HQOS_MODE && eth_hdr(skb)->h_proto == HQOS_MAGIC_TAG) {
developerfd40db22021-04-29 10:08:25 +08002250 skb_hnat_entry(skb) = ntohs(veth->h_vlan_TCI) & 0x3fff;
2251 skb_hnat_reason(skb) = HIT_BIND_FORCE_TO_CPU;
2252 }
developerfd40db22021-04-29 10:08:25 +08002253
2254 if (skb_hnat_iface(skb) == FOE_MAGIC_EXT)
2255 clr_from_extge(skb);
2256
2257 /* packets from external devices -> xxx ,step 2, learning stage */
developeraf07fad2021-11-19 17:53:42 +08002258 if (do_ext2ge_fast_learn(state->in, skb) && (!qos_toggle ||
2259 (qos_toggle && eth_hdr(skb)->h_proto != HQOS_MAGIC_TAG))) {
developerfd40db22021-04-29 10:08:25 +08002260 if (!do_hnat_ext_to_ge2(skb, __func__))
2261 return NF_STOLEN;
2262 goto drop;
2263 }
2264
2265 /* packets form ge -> external device */
2266 if (do_ge2ext_fast(state->in, skb)) {
2267 if (!do_hnat_ge_to_ext(skb, __func__))
2268 return NF_STOLEN;
2269 goto drop;
2270 }
2271
2272 return NF_ACCEPT;
2273drop:
2274 printk_ratelimited(KERN_WARNING
2275 "%s:drop (in_dev=%s, iif=0x%x, CB2=0x%x, ppe_hash=0x%x, sport=0x%x, reason=0x%x, alg=0x%x)\n",
2276 __func__, state->in->name, skb_hnat_iface(skb),
2277 HNAT_SKB_CB2(skb)->magic, skb_hnat_entry(skb),
2278 skb_hnat_sport(skb), skb_hnat_reason(skb),
2279 skb_hnat_alg(skb));
2280
2281 return NF_DROP;
2282}
2283
2284static unsigned int
2285mtk_hnat_br_nf_local_out(void *priv, struct sk_buff *skb,
2286 const struct nf_hook_state *state)
2287{
2288 post_routing_print(skb, state->in, state->out, __func__);
2289
2290 if (!mtk_hnat_nf_post_routing(skb, state->out, 0, __func__))
2291 return NF_ACCEPT;
2292
2293 trace_printk(
2294 "%s:drop (iif=0x%x, out_dev=%s, CB2=0x%x, ppe_hash=0x%x, sport=0x%x, reason=0x%x, alg=0x%x)\n",
2295 __func__, skb_hnat_iface(skb), state->out->name, HNAT_SKB_CB2(skb)->magic,
2296 skb_hnat_entry(skb), skb_hnat_sport(skb), skb_hnat_reason(skb),
2297 skb_hnat_alg(skb));
2298
2299 return NF_DROP;
2300}
2301
2302static unsigned int
2303mtk_hnat_ipv4_nf_local_out(void *priv, struct sk_buff *skb,
2304 const struct nf_hook_state *state)
2305{
2306 struct sk_buff *new_skb;
2307 struct foe_entry *entry;
2308 struct iphdr *iph;
2309
2310 if (!skb_hnat_is_hashed(skb))
2311 return NF_ACCEPT;
2312
developer471f6562021-05-10 20:48:34 +08002313 entry = &hnat_priv->foe_table_cpu[skb_hnat_ppe(skb)][skb_hnat_entry(skb)];
developerfd40db22021-04-29 10:08:25 +08002314
2315 if (unlikely(skb_headroom(skb) < FOE_INFO_LEN)) {
2316 new_skb = skb_realloc_headroom(skb, FOE_INFO_LEN);
2317 if (!new_skb) {
2318 dev_info(hnat_priv->dev, "%s:drop\n", __func__);
2319 return NF_DROP;
2320 }
2321 dev_kfree_skb(skb);
2322 skb = new_skb;
2323 }
2324
2325 /* Make the flow from local not be bound. */
2326 iph = ip_hdr(skb);
2327 if (iph->protocol == IPPROTO_IPV6) {
2328 entry->udib1.pkt_type = IPV6_6RD;
2329 hnat_set_head_frags(state, skb, 0, hnat_set_alg);
2330 } else {
2331 hnat_set_head_frags(state, skb, 1, hnat_set_alg);
2332 }
2333
2334 return NF_ACCEPT;
2335}
2336
2337static unsigned int mtk_hnat_br_nf_forward(void *priv,
2338 struct sk_buff *skb,
2339 const struct nf_hook_state *state)
2340{
developer99506e52021-06-30 22:03:02 +08002341 if ((hnat_priv->data->version == MTK_HNAT_V2) &&
2342 unlikely(IS_EXT(state->in) && IS_EXT(state->out)))
developerfd40db22021-04-29 10:08:25 +08002343 hnat_set_head_frags(state, skb, 1, hnat_set_alg);
2344
2345 return NF_ACCEPT;
2346}
2347
2348static struct nf_hook_ops mtk_hnat_nf_ops[] __read_mostly = {
2349 {
2350 .hook = mtk_hnat_ipv4_nf_pre_routing,
2351 .pf = NFPROTO_IPV4,
2352 .hooknum = NF_INET_PRE_ROUTING,
2353 .priority = NF_IP_PRI_FIRST + 1,
2354 },
2355 {
2356 .hook = mtk_hnat_ipv6_nf_pre_routing,
2357 .pf = NFPROTO_IPV6,
2358 .hooknum = NF_INET_PRE_ROUTING,
2359 .priority = NF_IP_PRI_FIRST + 1,
2360 },
2361 {
2362 .hook = mtk_hnat_ipv6_nf_post_routing,
2363 .pf = NFPROTO_IPV6,
2364 .hooknum = NF_INET_POST_ROUTING,
2365 .priority = NF_IP_PRI_LAST,
2366 },
2367 {
2368 .hook = mtk_hnat_ipv6_nf_local_out,
2369 .pf = NFPROTO_IPV6,
2370 .hooknum = NF_INET_LOCAL_OUT,
2371 .priority = NF_IP_PRI_LAST,
2372 },
2373 {
2374 .hook = mtk_hnat_ipv4_nf_post_routing,
2375 .pf = NFPROTO_IPV4,
2376 .hooknum = NF_INET_POST_ROUTING,
2377 .priority = NF_IP_PRI_LAST,
2378 },
2379 {
2380 .hook = mtk_hnat_ipv4_nf_local_out,
2381 .pf = NFPROTO_IPV4,
2382 .hooknum = NF_INET_LOCAL_OUT,
2383 .priority = NF_IP_PRI_LAST,
2384 },
2385 {
2386 .hook = mtk_hnat_br_nf_local_in,
2387 .pf = NFPROTO_BRIDGE,
2388 .hooknum = NF_BR_LOCAL_IN,
2389 .priority = NF_BR_PRI_FIRST,
2390 },
2391 {
2392 .hook = mtk_hnat_br_nf_local_out,
2393 .pf = NFPROTO_BRIDGE,
2394 .hooknum = NF_BR_LOCAL_OUT,
2395 .priority = NF_BR_PRI_LAST - 1,
2396 },
2397 {
2398 .hook = mtk_pong_hqos_handler,
2399 .pf = NFPROTO_BRIDGE,
2400 .hooknum = NF_BR_PRE_ROUTING,
developer2b85f652021-08-19 16:09:50 +08002401 .priority = NF_BR_PRI_FIRST + 1,
developerfd40db22021-04-29 10:08:25 +08002402 },
2403};
2404
2405int hnat_register_nf_hooks(void)
2406{
2407 return nf_register_net_hooks(&init_net, mtk_hnat_nf_ops, ARRAY_SIZE(mtk_hnat_nf_ops));
2408}
2409
2410void hnat_unregister_nf_hooks(void)
2411{
2412 nf_unregister_net_hooks(&init_net, mtk_hnat_nf_ops, ARRAY_SIZE(mtk_hnat_nf_ops));
2413}
2414
2415int whnat_adjust_nf_hooks(void)
2416{
2417 struct nf_hook_ops *hook = mtk_hnat_nf_ops;
2418 unsigned int n = ARRAY_SIZE(mtk_hnat_nf_ops);
2419
developerfd40db22021-04-29 10:08:25 +08002420 while (n-- > 0) {
2421 if (hook[n].hook == mtk_hnat_br_nf_local_in) {
2422 hook[n].hooknum = NF_BR_PRE_ROUTING;
developer2b85f652021-08-19 16:09:50 +08002423 hook[n].priority = NF_BR_PRI_FIRST + 1;
developerfd40db22021-04-29 10:08:25 +08002424 } else if (hook[n].hook == mtk_hnat_br_nf_local_out) {
2425 hook[n].hooknum = NF_BR_POST_ROUTING;
2426 } else if (hook[n].hook == mtk_pong_hqos_handler) {
2427 hook[n].hook = mtk_hnat_br_nf_forward;
2428 hook[n].hooknum = NF_BR_FORWARD;
2429 hook[n].priority = NF_BR_PRI_LAST - 1;
2430 }
2431 }
2432
2433 return 0;
2434}
2435
developerfd40db22021-04-29 10:08:25 +08002436int mtk_hqos_ptype_cb(struct sk_buff *skb, struct net_device *dev,
2437 struct packet_type *pt, struct net_device *unused)
2438{
2439 struct vlan_ethhdr *veth = (struct vlan_ethhdr *)skb_mac_header(skb);
2440
2441 skb_hnat_entry(skb) = ntohs(veth->h_vlan_TCI) & 0x3fff;
2442 skb_hnat_reason(skb) = HIT_BIND_FORCE_TO_CPU;
2443
2444 do_hnat_ge_to_ext(skb, __func__);
2445
2446 return 0;
2447}
developerfd40db22021-04-29 10:08:25 +08002448