blob: d2e09e2bc65cbaabd3eb490108cee93cfd13f2ba [file] [log] [blame]
developerfd40db22021-04-29 10:08:25 +08001/* This program is free software; you can redistribute it and/or modify
2 * it under the terms of the GNU General Public License as published by
3 * the Free Software Foundation; version 2 of the License
4 *
5 * This program is distributed in the hope that it will be useful,
6 * but WITHOUT ANY WARRANTY; without even the implied warranty of
7 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
8 * GNU General Public License for more details.
9 *
10 * Copyright (C) 2014-2016 Sean Wang <sean.wang@mediatek.com>
11 * Copyright (C) 2016-2017 John Crispin <blogic@openwrt.org>
12 */
13
14#include <linux/netfilter_bridge.h>
15#include <linux/netfilter_ipv6.h>
16
17#include <net/arp.h>
18#include <net/neighbour.h>
19#include <net/netfilter/nf_conntrack_helper.h>
20#include <net/netfilter/nf_flow_table.h>
21#include <net/ipv6.h>
22#include <net/ip6_route.h>
23#include <net/ip.h>
24#include <net/tcp.h>
25#include <net/udp.h>
developer30a47682021-11-02 17:06:14 +080026#include <net/netfilter/nf_conntrack.h>
27#include <net/netfilter/nf_conntrack_acct.h>
developerfd40db22021-04-29 10:08:25 +080028
29#include "nf_hnat_mtk.h"
30#include "hnat.h"
31
32#include "../mtk_eth_soc.h"
developer8051e042022-04-08 13:26:36 +080033#include "../mtk_eth_reset.h"
developerfd40db22021-04-29 10:08:25 +080034
35#define do_ge2ext_fast(dev, skb) \
developerd35bbcc2022-09-28 22:46:01 +080036 ((IS_LAN_GRP(dev) || IS_WAN(dev) || IS_PPD(dev)) && \
developerfd40db22021-04-29 10:08:25 +080037 skb_hnat_is_hashed(skb) && \
38 skb_hnat_reason(skb) == HIT_BIND_FORCE_TO_CPU)
39#define do_ext2ge_fast_learn(dev, skb) \
40 (IS_PPD(dev) && \
41 (skb_hnat_sport(skb) == NR_PDMA_PORT || \
42 skb_hnat_sport(skb) == NR_QDMA_PORT) && \
43 ((get_dev_from_index(skb->vlan_tci & VLAN_VID_MASK)) || \
44 get_wandev_from_index(skb->vlan_tci & VLAN_VID_MASK)))
45#define do_mape_w2l_fast(dev, skb) \
46 (mape_toggle && IS_WAN(dev) && (!is_from_mape(skb)))
47
48static struct ipv6hdr mape_l2w_v6h;
49static struct ipv6hdr mape_w2l_v6h;
50static inline uint8_t get_wifi_hook_if_index_from_dev(const struct net_device *dev)
51{
52 int i;
53
54 for (i = 1; i < MAX_IF_NUM; i++) {
55 if (hnat_priv->wifi_hook_if[i] == dev)
56 return i;
57 }
58
59 return 0;
60}
61
62static inline int get_ext_device_number(void)
63{
64 int i, number = 0;
65
66 for (i = 0; i < MAX_EXT_DEVS && hnat_priv->ext_if[i]; i++)
67 number += 1;
68 return number;
69}
70
71static inline int find_extif_from_devname(const char *name)
72{
73 int i;
74 struct extdev_entry *ext_entry;
75
76 for (i = 0; i < MAX_EXT_DEVS && hnat_priv->ext_if[i]; i++) {
77 ext_entry = hnat_priv->ext_if[i];
78 if (!strcmp(name, ext_entry->name))
79 return 1;
80 }
81 return 0;
82}
83
84static inline int get_index_from_dev(const struct net_device *dev)
85{
86 int i;
87 struct extdev_entry *ext_entry;
88
89 for (i = 0; i < MAX_EXT_DEVS && hnat_priv->ext_if[i]; i++) {
90 ext_entry = hnat_priv->ext_if[i];
91 if (dev == ext_entry->dev)
92 return ext_entry->dev->ifindex;
93 }
94 return 0;
95}
96
97static inline struct net_device *get_dev_from_index(int index)
98{
99 int i;
100 struct extdev_entry *ext_entry;
101 struct net_device *dev = 0;
102
103 for (i = 0; i < MAX_EXT_DEVS && hnat_priv->ext_if[i]; i++) {
104 ext_entry = hnat_priv->ext_if[i];
105 if (ext_entry->dev && index == ext_entry->dev->ifindex) {
106 dev = ext_entry->dev;
107 break;
108 }
109 }
110 return dev;
111}
112
113static inline struct net_device *get_wandev_from_index(int index)
114{
developer8c9c0d02021-06-18 16:15:37 +0800115 if (!hnat_priv->g_wandev)
116 hnat_priv->g_wandev = dev_get_by_name(&init_net, hnat_priv->wan);
developerfd40db22021-04-29 10:08:25 +0800117
developer8c9c0d02021-06-18 16:15:37 +0800118 if (hnat_priv->g_wandev && hnat_priv->g_wandev->ifindex == index)
119 return hnat_priv->g_wandev;
developerfd40db22021-04-29 10:08:25 +0800120 return NULL;
121}
122
123static inline int extif_set_dev(struct net_device *dev)
124{
125 int i;
126 struct extdev_entry *ext_entry;
127
128 for (i = 0; i < MAX_EXT_DEVS && hnat_priv->ext_if[i]; i++) {
129 ext_entry = hnat_priv->ext_if[i];
130 if (!strcmp(dev->name, ext_entry->name) && !ext_entry->dev) {
131 dev_hold(dev);
132 ext_entry->dev = dev;
133 pr_info("%s(%s)\n", __func__, dev->name);
134
135 return ext_entry->dev->ifindex;
136 }
137 }
138
139 return -1;
140}
141
142static inline int extif_put_dev(struct net_device *dev)
143{
144 int i;
145 struct extdev_entry *ext_entry;
146
147 for (i = 0; i < MAX_EXT_DEVS && hnat_priv->ext_if[i]; i++) {
148 ext_entry = hnat_priv->ext_if[i];
149 if (ext_entry->dev == dev) {
150 ext_entry->dev = NULL;
151 dev_put(dev);
152 pr_info("%s(%s)\n", __func__, dev->name);
153
developerbc53e5f2021-05-21 10:07:17 +0800154 return 0;
developerfd40db22021-04-29 10:08:25 +0800155 }
156 }
157
158 return -1;
159}
160
161int ext_if_add(struct extdev_entry *ext_entry)
162{
163 int len = get_ext_device_number();
164
developer4c32b7a2021-11-13 16:46:43 +0800165 if (len < MAX_EXT_DEVS)
166 hnat_priv->ext_if[len++] = ext_entry;
167
developerfd40db22021-04-29 10:08:25 +0800168 return len;
169}
170
171int ext_if_del(struct extdev_entry *ext_entry)
172{
173 int i, j;
174
175 for (i = 0; i < MAX_EXT_DEVS; i++) {
176 if (hnat_priv->ext_if[i] == ext_entry) {
177 for (j = i; hnat_priv->ext_if[j] && j < MAX_EXT_DEVS - 1; j++)
178 hnat_priv->ext_if[j] = hnat_priv->ext_if[j + 1];
179 hnat_priv->ext_if[j] = NULL;
180 break;
181 }
182 }
183
184 return i;
185}
186
187void foe_clear_all_bind_entries(struct net_device *dev)
188{
developer471f6562021-05-10 20:48:34 +0800189 int i, hash_index;
developerfd40db22021-04-29 10:08:25 +0800190 struct foe_entry *entry;
191
developerd35bbcc2022-09-28 22:46:01 +0800192 if (!IS_LAN_GRP(dev) && !IS_WAN(dev) &&
developerfd40db22021-04-29 10:08:25 +0800193 !find_extif_from_devname(dev->name) &&
194 !dev->netdev_ops->ndo_flow_offload_check)
195 return;
196
developer471f6562021-05-10 20:48:34 +0800197 for (i = 0; i < CFG_PPE_NUM; i++) {
198 cr_set_field(hnat_priv->ppe_base[i] + PPE_TB_CFG,
199 SMA, SMA_ONLY_FWD_CPU);
200
201 for (hash_index = 0; hash_index < hnat_priv->foe_etry_num; hash_index++) {
202 entry = hnat_priv->foe_table_cpu[i] + hash_index;
203 if (entry->bfib1.state == BIND) {
204 entry->ipv4_hnapt.udib1.state = INVALID;
205 entry->ipv4_hnapt.udib1.time_stamp =
206 readl((hnat_priv->fe_base + 0x0010)) & 0xFF;
207 }
developerfd40db22021-04-29 10:08:25 +0800208 }
209 }
210
211 /* clear HWNAT cache */
212 hnat_cache_ebl(1);
213
214 mod_timer(&hnat_priv->hnat_sma_build_entry_timer, jiffies + 3 * HZ);
215}
216
217static void gmac_ppe_fwd_enable(struct net_device *dev)
218{
219 if (IS_LAN(dev) || IS_GMAC1_MODE)
developerd35bbcc2022-09-28 22:46:01 +0800220 set_gmac_ppe_fwd(NR_GMAC1_PORT, 1);
developerfd40db22021-04-29 10:08:25 +0800221 else if (IS_WAN(dev))
developerd35bbcc2022-09-28 22:46:01 +0800222 set_gmac_ppe_fwd(NR_GMAC2_PORT, 1);
223 else if (IS_LAN2(dev))
224 set_gmac_ppe_fwd(NR_GMAC3_PORT, 1);
developerfd40db22021-04-29 10:08:25 +0800225}
226
227int nf_hnat_netdevice_event(struct notifier_block *unused, unsigned long event,
228 void *ptr)
229{
230 struct net_device *dev;
231
232 dev = netdev_notifier_info_to_dev(ptr);
233
234 switch (event) {
235 case NETDEV_UP:
236 gmac_ppe_fwd_enable(dev);
237
238 extif_set_dev(dev);
239
240 break;
241 case NETDEV_GOING_DOWN:
242 if (!get_wifi_hook_if_index_from_dev(dev))
243 extif_put_dev(dev);
244
245 foe_clear_all_bind_entries(dev);
246
247 break;
developer8c9c0d02021-06-18 16:15:37 +0800248 case NETDEV_UNREGISTER:
developer1901f412022-01-04 17:22:00 +0800249 if (hnat_priv->g_ppdev == dev) {
developer8c9c0d02021-06-18 16:15:37 +0800250 hnat_priv->g_ppdev = NULL;
251 dev_put(dev);
252 }
developer1901f412022-01-04 17:22:00 +0800253 if (hnat_priv->g_wandev == dev) {
developer8c9c0d02021-06-18 16:15:37 +0800254 hnat_priv->g_wandev = NULL;
255 dev_put(dev);
256 }
257
258 break;
259 case NETDEV_REGISTER:
260 if (IS_PPD(dev) && !hnat_priv->g_ppdev)
261 hnat_priv->g_ppdev = dev_get_by_name(&init_net, hnat_priv->ppd);
262 if (IS_WAN(dev) && !hnat_priv->g_wandev)
263 hnat_priv->g_wandev = dev_get_by_name(&init_net, hnat_priv->wan);
264
265 break;
developer8051e042022-04-08 13:26:36 +0800266 case MTK_FE_RESET_NAT_DONE:
267 pr_info("[%s] HNAT driver starts to do warm init !\n", __func__);
268 hnat_warm_init();
269 break;
developerfd40db22021-04-29 10:08:25 +0800270 default:
271 break;
272 }
273
274 return NOTIFY_DONE;
275}
276
277void foe_clear_entry(struct neighbour *neigh)
278{
279 u32 *daddr = (u32 *)neigh->primary_key;
280 unsigned char h_dest[ETH_ALEN];
281 struct foe_entry *entry;
developer471f6562021-05-10 20:48:34 +0800282 int i, hash_index;
developerfd40db22021-04-29 10:08:25 +0800283 u32 dip;
284
285 dip = (u32)(*daddr);
286
developer471f6562021-05-10 20:48:34 +0800287 for (i = 0; i < CFG_PPE_NUM; i++) {
developer8051e042022-04-08 13:26:36 +0800288 if (!hnat_priv->foe_table_cpu[i])
289 continue;
290
developer471f6562021-05-10 20:48:34 +0800291 for (hash_index = 0; hash_index < hnat_priv->foe_etry_num; hash_index++) {
292 entry = hnat_priv->foe_table_cpu[i] + hash_index;
293 if (entry->bfib1.state == BIND &&
294 entry->ipv4_hnapt.new_dip == ntohl(dip)) {
295 *((u32 *)h_dest) = swab32(entry->ipv4_hnapt.dmac_hi);
296 *((u16 *)&h_dest[4]) =
297 swab16(entry->ipv4_hnapt.dmac_lo);
298 if (strncmp(h_dest, neigh->ha, ETH_ALEN) != 0) {
299 pr_info("%s: state=%d\n", __func__,
300 neigh->nud_state);
301 cr_set_field(hnat_priv->ppe_base[i] + PPE_TB_CFG,
302 SMA, SMA_ONLY_FWD_CPU);
developerfd40db22021-04-29 10:08:25 +0800303
developer471f6562021-05-10 20:48:34 +0800304 entry->ipv4_hnapt.udib1.state = INVALID;
305 entry->ipv4_hnapt.udib1.time_stamp =
306 readl((hnat_priv->fe_base + 0x0010)) & 0xFF;
developerfd40db22021-04-29 10:08:25 +0800307
developer471f6562021-05-10 20:48:34 +0800308 /* clear HWNAT cache */
309 hnat_cache_ebl(1);
developerfd40db22021-04-29 10:08:25 +0800310
developer471f6562021-05-10 20:48:34 +0800311 mod_timer(&hnat_priv->hnat_sma_build_entry_timer,
312 jiffies + 3 * HZ);
developerfd40db22021-04-29 10:08:25 +0800313
developer471f6562021-05-10 20:48:34 +0800314 pr_info("Delete old entry: dip =%pI4\n", &dip);
315 pr_info("Old mac= %pM\n", h_dest);
316 pr_info("New mac= %pM\n", neigh->ha);
317 }
developerfd40db22021-04-29 10:08:25 +0800318 }
319 }
320 }
321}
322
323int nf_hnat_netevent_handler(struct notifier_block *unused, unsigned long event,
324 void *ptr)
325{
326 struct net_device *dev = NULL;
327 struct neighbour *neigh = NULL;
328
329 switch (event) {
330 case NETEVENT_NEIGH_UPDATE:
331 neigh = ptr;
332 dev = neigh->dev;
333 if (dev)
334 foe_clear_entry(neigh);
335 break;
336 }
337
338 return NOTIFY_DONE;
339}
340
341unsigned int mape_add_ipv6_hdr(struct sk_buff *skb, struct ipv6hdr mape_ip6h)
342{
343 struct ethhdr *eth = NULL;
344 struct ipv6hdr *ip6h = NULL;
345 struct iphdr *iph = NULL;
346
347 if (skb_headroom(skb) < IPV6_HDR_LEN || skb_shared(skb) ||
348 (skb_cloned(skb) && !skb_clone_writable(skb, 0))) {
349 return -1;
350 }
351
352 /* point to L3 */
353 memcpy(skb->data - IPV6_HDR_LEN - ETH_HLEN, skb_push(skb, ETH_HLEN), ETH_HLEN);
354 memcpy(skb_push(skb, IPV6_HDR_LEN - ETH_HLEN), &mape_ip6h, IPV6_HDR_LEN);
355
356 eth = (struct ethhdr *)(skb->data - ETH_HLEN);
357 eth->h_proto = htons(ETH_P_IPV6);
358 skb->protocol = htons(ETH_P_IPV6);
359
360 iph = (struct iphdr *)(skb->data + IPV6_HDR_LEN);
361 ip6h = (struct ipv6hdr *)(skb->data);
362 ip6h->payload_len = iph->tot_len; /* maybe different with ipv4 */
363
364 skb_set_network_header(skb, 0);
365 skb_set_transport_header(skb, iph->ihl * 4 + IPV6_HDR_LEN);
366 return 0;
367}
368
369static void fix_skb_packet_type(struct sk_buff *skb, struct net_device *dev,
370 struct ethhdr *eth)
371{
372 skb->pkt_type = PACKET_HOST;
373 if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
374 if (ether_addr_equal_64bits(eth->h_dest, dev->broadcast))
375 skb->pkt_type = PACKET_BROADCAST;
376 else
377 skb->pkt_type = PACKET_MULTICAST;
378 }
379}
380
381unsigned int do_hnat_ext_to_ge(struct sk_buff *skb, const struct net_device *in,
382 const char *func)
383{
384 if (hnat_priv->g_ppdev && hnat_priv->g_ppdev->flags & IFF_UP) {
385 u16 vlan_id = 0;
386 skb_set_network_header(skb, 0);
387 skb_push(skb, ETH_HLEN);
388 set_to_ppe(skb);
389
390 vlan_id = skb_vlan_tag_get_id(skb);
391 if (vlan_id) {
392 skb = vlan_insert_tag(skb, skb->vlan_proto, skb->vlan_tci);
393 if (!skb)
394 return -1;
395 }
396
397 /*set where we come from*/
398 skb->vlan_proto = htons(ETH_P_8021Q);
399 skb->vlan_tci =
400 (VLAN_CFI_MASK | (in->ifindex & VLAN_VID_MASK));
401 trace_printk(
402 "%s: vlan_prot=0x%x, vlan_tci=%x, in->name=%s, skb->dev->name=%s\n",
403 __func__, ntohs(skb->vlan_proto), skb->vlan_tci,
404 in->name, hnat_priv->g_ppdev->name);
405 skb->dev = hnat_priv->g_ppdev;
406 dev_queue_xmit(skb);
407 trace_printk("%s: called from %s successfully\n", __func__, func);
408 return 0;
409 }
410
411 trace_printk("%s: called from %s fail\n", __func__, func);
412 return -1;
413}
414
415unsigned int do_hnat_ext_to_ge2(struct sk_buff *skb, const char *func)
416{
417 struct ethhdr *eth = eth_hdr(skb);
418 struct net_device *dev;
419 struct foe_entry *entry;
420
421 trace_printk("%s: vlan_prot=0x%x, vlan_tci=%x\n", __func__,
422 ntohs(skb->vlan_proto), skb->vlan_tci);
423
developer577ad2f2022-11-28 10:33:36 +0800424 if (skb_hnat_entry(skb) >= hnat_priv->foe_etry_num ||
425 skb_hnat_ppe(skb) >= CFG_PPE_NUM)
426 return -1;
427
developerfd40db22021-04-29 10:08:25 +0800428 dev = get_dev_from_index(skb->vlan_tci & VLAN_VID_MASK);
429
430 if (dev) {
431 /*set where we to go*/
432 skb->dev = dev;
433 skb->vlan_proto = 0;
434 skb->vlan_tci = 0;
435
436 if (ntohs(eth->h_proto) == ETH_P_8021Q) {
437 skb = skb_vlan_untag(skb);
438 if (unlikely(!skb))
439 return -1;
440 }
441
442 if (IS_BOND_MODE &&
developer4164cfe2022-12-01 11:27:41 +0800443 (((hnat_priv->data->version == MTK_HNAT_V2 ||
444 hnat_priv->data->version == MTK_HNAT_V3) &&
developerfd40db22021-04-29 10:08:25 +0800445 (skb_hnat_entry(skb) != 0x7fff)) ||
developer4164cfe2022-12-01 11:27:41 +0800446 ((hnat_priv->data->version != MTK_HNAT_V2 &&
447 hnat_priv->data->version != MTK_HNAT_V3) &&
developerfd40db22021-04-29 10:08:25 +0800448 (skb_hnat_entry(skb) != 0x3fff))))
449 skb_set_hash(skb, skb_hnat_entry(skb) >> 1, PKT_HASH_TYPE_L4);
450
451 set_from_extge(skb);
452 fix_skb_packet_type(skb, skb->dev, eth);
453 netif_rx(skb);
454 trace_printk("%s: called from %s successfully\n", __func__,
455 func);
456 return 0;
457 } else {
458 /* MapE WAN --> LAN/WLAN PingPong. */
459 dev = get_wandev_from_index(skb->vlan_tci & VLAN_VID_MASK);
460 if (mape_toggle && dev) {
461 if (!mape_add_ipv6_hdr(skb, mape_w2l_v6h)) {
462 skb_set_mac_header(skb, -ETH_HLEN);
463 skb->dev = dev;
464 set_from_mape(skb);
465 skb->vlan_proto = 0;
466 skb->vlan_tci = 0;
467 fix_skb_packet_type(skb, skb->dev, eth_hdr(skb));
developer471f6562021-05-10 20:48:34 +0800468 entry = &hnat_priv->foe_table_cpu[skb_hnat_ppe(skb)][skb_hnat_entry(skb)];
developerfd40db22021-04-29 10:08:25 +0800469 entry->bfib1.pkt_type = IPV4_HNAPT;
470 netif_rx(skb);
471 return 0;
472 }
473 }
474 trace_printk("%s: called from %s fail\n", __func__, func);
475 return -1;
476 }
477}
478
479unsigned int do_hnat_ge_to_ext(struct sk_buff *skb, const char *func)
480{
481 /*set where we to go*/
482 u8 index;
483 struct foe_entry *entry;
484 struct net_device *dev;
485
developer577ad2f2022-11-28 10:33:36 +0800486 if (skb_hnat_entry(skb) >= hnat_priv->foe_etry_num ||
487 skb_hnat_ppe(skb) >= CFG_PPE_NUM)
488 return -1;
489
developer471f6562021-05-10 20:48:34 +0800490 entry = &hnat_priv->foe_table_cpu[skb_hnat_ppe(skb)][skb_hnat_entry(skb)];
developerfd40db22021-04-29 10:08:25 +0800491
492 if (IS_IPV4_GRP(entry))
493 index = entry->ipv4_hnapt.act_dp;
494 else
495 index = entry->ipv6_5t_route.act_dp;
496
497 skb->dev = get_dev_from_index(index);
498
developer34028fb2022-01-11 13:51:29 +0800499 if (IS_HQOS_MODE && eth_hdr(skb)->h_proto == HQOS_MAGIC_TAG) {
developerfd40db22021-04-29 10:08:25 +0800500 skb = skb_unshare(skb, GFP_ATOMIC);
501 if (!skb)
502 return NF_ACCEPT;
503
504 if (unlikely(!pskb_may_pull(skb, VLAN_HLEN)))
505 return NF_ACCEPT;
506
507 skb_pull_rcsum(skb, VLAN_HLEN);
508
509 memmove(skb->data - ETH_HLEN, skb->data - ETH_HLEN - VLAN_HLEN,
510 2 * ETH_ALEN);
511 }
developerfd40db22021-04-29 10:08:25 +0800512
513 if (skb->dev) {
514 skb_set_network_header(skb, 0);
515 skb_push(skb, ETH_HLEN);
516 dev_queue_xmit(skb);
517 trace_printk("%s: called from %s successfully\n", __func__,
518 func);
519 return 0;
520 } else {
521 if (mape_toggle) {
522 /* Add ipv6 header mape for lan/wlan -->wan */
523 dev = get_wandev_from_index(index);
524 if (dev) {
525 if (!mape_add_ipv6_hdr(skb, mape_l2w_v6h)) {
526 skb_set_network_header(skb, 0);
527 skb_push(skb, ETH_HLEN);
528 skb_set_mac_header(skb, 0);
529 skb->dev = dev;
530 dev_queue_xmit(skb);
531 return 0;
532 }
533 trace_printk("%s: called from %s fail[MapE]\n", __func__,
534 func);
535 return -1;
536 }
537 }
538 }
539 /*if external devices is down, invalidate related ppe entry*/
540 if (entry_hnat_is_bound(entry)) {
541 entry->bfib1.state = INVALID;
542 if (IS_IPV4_GRP(entry))
543 entry->ipv4_hnapt.act_dp = 0;
544 else
545 entry->ipv6_5t_route.act_dp = 0;
546
547 /* clear HWNAT cache */
548 hnat_cache_ebl(1);
549 }
550 trace_printk("%s: called from %s fail, index=%x\n", __func__,
551 func, index);
552 return -1;
553}
554
555static void pre_routing_print(struct sk_buff *skb, const struct net_device *in,
556 const struct net_device *out, const char *func)
557{
558 trace_printk(
559 "[%s]: %s(iif=0x%x CB2=0x%x)-->%s (ppe_hash=0x%x) sport=0x%x reason=0x%x alg=0x%x from %s\n",
560 __func__, in->name, skb_hnat_iface(skb),
561 HNAT_SKB_CB2(skb)->magic, out->name, skb_hnat_entry(skb),
562 skb_hnat_sport(skb), skb_hnat_reason(skb), skb_hnat_alg(skb),
563 func);
564}
565
566static void post_routing_print(struct sk_buff *skb, const struct net_device *in,
567 const struct net_device *out, const char *func)
568{
569 trace_printk(
570 "[%s]: %s(iif=0x%x, CB2=0x%x)-->%s (ppe_hash=0x%x) sport=0x%x reason=0x%x alg=0x%x from %s\n",
571 __func__, in->name, skb_hnat_iface(skb),
572 HNAT_SKB_CB2(skb)->magic, out->name, skb_hnat_entry(skb),
573 skb_hnat_sport(skb), skb_hnat_reason(skb), skb_hnat_alg(skb),
574 func);
575}
576
577static inline void hnat_set_iif(const struct nf_hook_state *state,
578 struct sk_buff *skb, int val)
579{
developer40017972021-06-29 14:27:35 +0800580 if (IS_WHNAT(state->in) && FROM_WED(skb)) {
developere567ad32021-05-25 17:16:17 +0800581 return;
582 } else if (IS_LAN(state->in)) {
developerfd40db22021-04-29 10:08:25 +0800583 skb_hnat_iface(skb) = FOE_MAGIC_GE_LAN;
developerd35bbcc2022-09-28 22:46:01 +0800584 } else if (IS_LAN2(state->in)) {
585 skb_hnat_iface(skb) = FOE_MAGIC_GE_LAN2;
developerfd40db22021-04-29 10:08:25 +0800586 } else if (IS_PPD(state->in)) {
587 skb_hnat_iface(skb) = FOE_MAGIC_GE_PPD;
588 } else if (IS_EXT(state->in)) {
589 skb_hnat_iface(skb) = FOE_MAGIC_EXT;
590 } else if (IS_WAN(state->in)) {
591 skb_hnat_iface(skb) = FOE_MAGIC_GE_WAN;
developerfd40db22021-04-29 10:08:25 +0800592 } else if (!IS_BR(state->in)) {
developer99506e52021-06-30 22:03:02 +0800593 if (state->in->netdev_ops->ndo_flow_offload_check) {
594 skb_hnat_iface(skb) = FOE_MAGIC_GE_VIRTUAL;
595 } else {
596 skb_hnat_iface(skb) = FOE_INVALID;
developerfd40db22021-04-29 10:08:25 +0800597
developer99506e52021-06-30 22:03:02 +0800598 if (is_magic_tag_valid(skb) &&
599 IS_SPACE_AVAILABLE_HEAD(skb))
600 memset(skb_hnat_info(skb), 0, FOE_INFO_LEN);
601 }
developerfd40db22021-04-29 10:08:25 +0800602 }
603}
604
605static inline void hnat_set_alg(const struct nf_hook_state *state,
606 struct sk_buff *skb, int val)
607{
608 skb_hnat_alg(skb) = val;
609}
610
611static inline void hnat_set_head_frags(const struct nf_hook_state *state,
612 struct sk_buff *head_skb, int val,
613 void (*fn)(const struct nf_hook_state *state,
614 struct sk_buff *skb, int val))
615{
616 struct sk_buff *segs = skb_shinfo(head_skb)->frag_list;
617
618 fn(state, head_skb, val);
619 while (segs) {
620 fn(state, segs, val);
621 segs = segs->next;
622 }
623}
624
developer25fc8c02022-05-06 16:24:02 +0800625static void ppe_fill_flow_lbl(struct foe_entry *entry, struct ipv6hdr *ip6h)
626{
627 entry->ipv4_dslite.flow_lbl[0] = ip6h->flow_lbl[2];
628 entry->ipv4_dslite.flow_lbl[1] = ip6h->flow_lbl[1];
629 entry->ipv4_dslite.flow_lbl[2] = ip6h->flow_lbl[0];
630}
631
developerfd40db22021-04-29 10:08:25 +0800632unsigned int do_hnat_mape_w2l_fast(struct sk_buff *skb, const struct net_device *in,
633 const char *func)
634{
635 struct ipv6hdr *ip6h = ipv6_hdr(skb);
636 struct iphdr _iphdr;
637 struct iphdr *iph;
638 struct ethhdr *eth;
639
640 /* WAN -> LAN/WLAN MapE. */
641 if (mape_toggle && (ip6h->nexthdr == NEXTHDR_IPIP)) {
642 iph = skb_header_pointer(skb, IPV6_HDR_LEN, sizeof(_iphdr), &_iphdr);
developer4c32b7a2021-11-13 16:46:43 +0800643 if (unlikely(!iph))
644 return -1;
645
developerfd40db22021-04-29 10:08:25 +0800646 switch (iph->protocol) {
647 case IPPROTO_UDP:
648 case IPPROTO_TCP:
649 break;
650 default:
651 return -1;
652 }
653 mape_w2l_v6h = *ip6h;
654
655 /* Remove ipv6 header. */
656 memcpy(skb->data + IPV6_HDR_LEN - ETH_HLEN,
657 skb->data - ETH_HLEN, ETH_HLEN);
658 skb_pull(skb, IPV6_HDR_LEN - ETH_HLEN);
659 skb_set_mac_header(skb, 0);
660 skb_set_network_header(skb, ETH_HLEN);
661 skb_set_transport_header(skb, ETH_HLEN + sizeof(_iphdr));
662
663 eth = eth_hdr(skb);
664 eth->h_proto = htons(ETH_P_IP);
665 set_to_ppe(skb);
666
667 skb->vlan_proto = htons(ETH_P_8021Q);
668 skb->vlan_tci =
669 (VLAN_CFI_MASK | (in->ifindex & VLAN_VID_MASK));
670
671 if (!hnat_priv->g_ppdev)
672 hnat_priv->g_ppdev = dev_get_by_name(&init_net, hnat_priv->ppd);
673
674 skb->dev = hnat_priv->g_ppdev;
675 skb->protocol = htons(ETH_P_IP);
676
677 dev_queue_xmit(skb);
678
679 return 0;
680 }
681 return -1;
682}
683
developer25fc8c02022-05-06 16:24:02 +0800684
developer25fc8c02022-05-06 16:24:02 +0800685
developerfd40db22021-04-29 10:08:25 +0800686static unsigned int is_ppe_support_type(struct sk_buff *skb)
687{
688 struct ethhdr *eth = NULL;
689 struct iphdr *iph = NULL;
690 struct ipv6hdr *ip6h = NULL;
691 struct iphdr _iphdr;
692
693 eth = eth_hdr(skb);
developerfd2d7422021-06-09 17:09:39 +0800694 if (!is_magic_tag_valid(skb) || !IS_SPACE_AVAILABLE_HEAD(skb) ||
developerb254f762022-01-20 20:06:25 +0800695 is_broadcast_ether_addr(eth->h_dest))
developerfd40db22021-04-29 10:08:25 +0800696 return 0;
697
698 switch (ntohs(skb->protocol)) {
699 case ETH_P_IP:
700 iph = ip_hdr(skb);
701
702 /* do not accelerate non tcp/udp traffic */
703 if ((iph->protocol == IPPROTO_TCP) ||
704 (iph->protocol == IPPROTO_UDP) ||
705 (iph->protocol == IPPROTO_IPV6)) {
706 return 1;
707 }
708
709 break;
710 case ETH_P_IPV6:
711 ip6h = ipv6_hdr(skb);
712
713 if ((ip6h->nexthdr == NEXTHDR_TCP) ||
714 (ip6h->nexthdr == NEXTHDR_UDP)) {
715 return 1;
716 } else if (ip6h->nexthdr == NEXTHDR_IPIP) {
717 iph = skb_header_pointer(skb, IPV6_HDR_LEN,
718 sizeof(_iphdr), &_iphdr);
developer4c32b7a2021-11-13 16:46:43 +0800719 if (unlikely(!iph))
720 return 0;
developerfd40db22021-04-29 10:08:25 +0800721
722 if ((iph->protocol == IPPROTO_TCP) ||
723 (iph->protocol == IPPROTO_UDP)) {
724 return 1;
725 }
726
727 }
728
729 break;
730 case ETH_P_8021Q:
731 return 1;
732 }
733
734 return 0;
735}
736
737static unsigned int
738mtk_hnat_ipv6_nf_pre_routing(void *priv, struct sk_buff *skb,
739 const struct nf_hook_state *state)
740{
developer577ad2f2022-11-28 10:33:36 +0800741 if (!skb)
742 goto drop;
743
developerfd40db22021-04-29 10:08:25 +0800744 if (!is_ppe_support_type(skb)) {
745 hnat_set_head_frags(state, skb, 1, hnat_set_alg);
746 return NF_ACCEPT;
747 }
748
749 hnat_set_head_frags(state, skb, -1, hnat_set_iif);
750
751 pre_routing_print(skb, state->in, state->out, __func__);
752
developerfd40db22021-04-29 10:08:25 +0800753 /* packets from external devices -> xxx ,step 1 , learning stage & bound stage*/
754 if (do_ext2ge_fast_try(state->in, skb)) {
755 if (!do_hnat_ext_to_ge(skb, state->in, __func__))
756 return NF_STOLEN;
developerfd40db22021-04-29 10:08:25 +0800757 return NF_ACCEPT;
758 }
759
760 /* packets form ge -> external device
761 * For standalone wan interface
762 */
763 if (do_ge2ext_fast(state->in, skb)) {
764 if (!do_hnat_ge_to_ext(skb, __func__))
765 return NF_STOLEN;
766 goto drop;
767 }
768
developerf4c370a2022-10-08 17:01:19 +0800769
770#if !(defined(CONFIG_MEDIATEK_NETSYS_V2) || defined(CONFIG_MEDIATEK_NETSYS_V3))
developerfd40db22021-04-29 10:08:25 +0800771 /* MapE need remove ipv6 header and pingpong. */
772 if (do_mape_w2l_fast(state->in, skb)) {
773 if (!do_hnat_mape_w2l_fast(skb, state->in, __func__))
774 return NF_STOLEN;
775 else
776 return NF_ACCEPT;
777 }
778
779 if (is_from_mape(skb))
780 clr_from_extge(skb);
developerf4c370a2022-10-08 17:01:19 +0800781#endif
developerfd40db22021-04-29 10:08:25 +0800782 return NF_ACCEPT;
783drop:
developer577ad2f2022-11-28 10:33:36 +0800784 if (skb)
785 printk_ratelimited(KERN_WARNING
786 "%s:drop (in_dev=%s, iif=0x%x, CB2=0x%x, ppe_hash=0x%x,\n"
787 "sport=0x%x, reason=0x%x, alg=0x%x)\n",
788 __func__, state->in->name, skb_hnat_iface(skb),
789 HNAT_SKB_CB2(skb)->magic, skb_hnat_entry(skb),
790 skb_hnat_sport(skb), skb_hnat_reason(skb),
791 skb_hnat_alg(skb));
developerfd40db22021-04-29 10:08:25 +0800792
793 return NF_DROP;
794}
795
796static unsigned int
797mtk_hnat_ipv4_nf_pre_routing(void *priv, struct sk_buff *skb,
798 const struct nf_hook_state *state)
799{
developer577ad2f2022-11-28 10:33:36 +0800800 if (!skb)
801 goto drop;
802
developerfd40db22021-04-29 10:08:25 +0800803 if (!is_ppe_support_type(skb)) {
804 hnat_set_head_frags(state, skb, 1, hnat_set_alg);
805 return NF_ACCEPT;
806 }
807
808 hnat_set_head_frags(state, skb, -1, hnat_set_iif);
809
810 pre_routing_print(skb, state->in, state->out, __func__);
811
developerfd40db22021-04-29 10:08:25 +0800812 /* packets from external devices -> xxx ,step 1 , learning stage & bound stage*/
813 if (do_ext2ge_fast_try(state->in, skb)) {
814 if (!do_hnat_ext_to_ge(skb, state->in, __func__))
815 return NF_STOLEN;
developerfd40db22021-04-29 10:08:25 +0800816 return NF_ACCEPT;
817 }
818
819 /* packets form ge -> external device
820 * For standalone wan interface
821 */
822 if (do_ge2ext_fast(state->in, skb)) {
823 if (!do_hnat_ge_to_ext(skb, __func__))
824 return NF_STOLEN;
825 goto drop;
826 }
827
828 return NF_ACCEPT;
829drop:
developer577ad2f2022-11-28 10:33:36 +0800830 if (skb)
831 printk_ratelimited(KERN_WARNING
832 "%s:drop (in_dev=%s, iif=0x%x, CB2=0x%x, ppe_hash=0x%x,\n"
833 "sport=0x%x, reason=0x%x, alg=0x%x)\n",
834 __func__, state->in->name, skb_hnat_iface(skb),
835 HNAT_SKB_CB2(skb)->magic, skb_hnat_entry(skb),
836 skb_hnat_sport(skb), skb_hnat_reason(skb),
837 skb_hnat_alg(skb));
developerfd40db22021-04-29 10:08:25 +0800838
839 return NF_DROP;
840}
841
842static unsigned int
843mtk_hnat_br_nf_local_in(void *priv, struct sk_buff *skb,
844 const struct nf_hook_state *state)
845{
developerfd40db22021-04-29 10:08:25 +0800846 struct vlan_ethhdr *veth;
847
developer577ad2f2022-11-28 10:33:36 +0800848 if (!skb)
849 goto drop;
850
developer34028fb2022-01-11 13:51:29 +0800851 if (IS_HQOS_MODE && hnat_priv->data->whnat) {
developerfd40db22021-04-29 10:08:25 +0800852 veth = (struct vlan_ethhdr *)skb_mac_header(skb);
853
854 if (eth_hdr(skb)->h_proto == HQOS_MAGIC_TAG) {
855 skb_hnat_entry(skb) = ntohs(veth->h_vlan_TCI) & 0x3fff;
856 skb_hnat_reason(skb) = HIT_BIND_FORCE_TO_CPU;
857 }
858 }
developerfd40db22021-04-29 10:08:25 +0800859
860 if (!HAS_HQOS_MAGIC_TAG(skb) && !is_ppe_support_type(skb)) {
861 hnat_set_head_frags(state, skb, 1, hnat_set_alg);
862 return NF_ACCEPT;
863 }
864
865 hnat_set_head_frags(state, skb, -1, hnat_set_iif);
866
867 pre_routing_print(skb, state->in, state->out, __func__);
868
869 if (unlikely(debug_level >= 7)) {
870 hnat_cpu_reason_cnt(skb);
871 if (skb_hnat_reason(skb) == dbg_cpu_reason)
872 foe_dump_pkt(skb);
873 }
874
developerfd40db22021-04-29 10:08:25 +0800875 /* packets from external devices -> xxx ,step 1 , learning stage & bound stage*/
876 if ((skb_hnat_iface(skb) == FOE_MAGIC_EXT) && !is_from_extge(skb) &&
877 !is_multicast_ether_addr(eth_hdr(skb)->h_dest)) {
878 if (!hnat_priv->g_ppdev)
879 hnat_priv->g_ppdev = dev_get_by_name(&init_net, hnat_priv->ppd);
880
881 if (!do_hnat_ext_to_ge(skb, state->in, __func__))
882 return NF_STOLEN;
developerfd40db22021-04-29 10:08:25 +0800883 return NF_ACCEPT;
884 }
885
886 if (hnat_priv->data->whnat) {
887 if (skb_hnat_iface(skb) == FOE_MAGIC_EXT)
888 clr_from_extge(skb);
889
890 /* packets from external devices -> xxx ,step 2, learning stage */
developeraf07fad2021-11-19 17:53:42 +0800891 if (do_ext2ge_fast_learn(state->in, skb) && (!qos_toggle ||
892 (qos_toggle && eth_hdr(skb)->h_proto != HQOS_MAGIC_TAG))) {
developerfd40db22021-04-29 10:08:25 +0800893 if (!do_hnat_ext_to_ge2(skb, __func__))
894 return NF_STOLEN;
895 goto drop;
896 }
897
898 /* packets form ge -> external device */
899 if (do_ge2ext_fast(state->in, skb)) {
900 if (!do_hnat_ge_to_ext(skb, __func__))
901 return NF_STOLEN;
902 goto drop;
903 }
904 }
905
developerf4c370a2022-10-08 17:01:19 +0800906#if !(defined(CONFIG_MEDIATEK_NETSYS_V2) || defined(CONFIG_MEDIATEK_NETSYS_V3))
developerfd40db22021-04-29 10:08:25 +0800907 /* MapE need remove ipv6 header and pingpong. (bridge mode) */
908 if (do_mape_w2l_fast(state->in, skb)) {
909 if (!do_hnat_mape_w2l_fast(skb, state->in, __func__))
910 return NF_STOLEN;
911 else
912 return NF_ACCEPT;
913 }
developerf4c370a2022-10-08 17:01:19 +0800914#endif
developerfd40db22021-04-29 10:08:25 +0800915 return NF_ACCEPT;
916drop:
developer577ad2f2022-11-28 10:33:36 +0800917 if (skb)
918 printk_ratelimited(KERN_WARNING
919 "%s:drop (in_dev=%s, iif=0x%x, CB2=0x%x, ppe_hash=0x%x,\n"
920 "sport=0x%x, reason=0x%x, alg=0x%x)\n",
921 __func__, state->in->name, skb_hnat_iface(skb),
922 HNAT_SKB_CB2(skb)->magic, skb_hnat_entry(skb),
923 skb_hnat_sport(skb), skb_hnat_reason(skb),
924 skb_hnat_alg(skb));
developerfd40db22021-04-29 10:08:25 +0800925
926 return NF_DROP;
927}
928
929static unsigned int hnat_ipv6_get_nexthop(struct sk_buff *skb,
930 const struct net_device *out,
931 struct flow_offload_hw_path *hw_path)
932{
933 const struct in6_addr *ipv6_nexthop;
934 struct neighbour *neigh = NULL;
935 struct dst_entry *dst = skb_dst(skb);
936 struct ethhdr *eth;
937
938 if (hw_path->flags & FLOW_OFFLOAD_PATH_PPPOE) {
939 memcpy(eth_hdr(skb)->h_source, hw_path->eth_src, ETH_ALEN);
940 memcpy(eth_hdr(skb)->h_dest, hw_path->eth_dest, ETH_ALEN);
941 return 0;
942 }
943
944 rcu_read_lock_bh();
945 ipv6_nexthop =
946 rt6_nexthop((struct rt6_info *)dst, &ipv6_hdr(skb)->daddr);
947 neigh = __ipv6_neigh_lookup_noref(dst->dev, ipv6_nexthop);
948 if (unlikely(!neigh)) {
949 dev_notice(hnat_priv->dev, "%s:No neigh (daddr=%pI6)\n", __func__,
950 &ipv6_hdr(skb)->daddr);
951 rcu_read_unlock_bh();
952 return -1;
953 }
954
955 /* why do we get all zero ethernet address ? */
956 if (!is_valid_ether_addr(neigh->ha)) {
957 rcu_read_unlock_bh();
958 return -1;
959 }
960
961 if (ipv6_hdr(skb)->nexthdr == NEXTHDR_IPIP) {
962 /*copy ether type for DS-Lite and MapE */
963 eth = (struct ethhdr *)(skb->data - ETH_HLEN);
964 eth->h_proto = skb->protocol;
965 } else {
966 eth = eth_hdr(skb);
967 }
968
969 ether_addr_copy(eth->h_dest, neigh->ha);
970 ether_addr_copy(eth->h_source, out->dev_addr);
971
972 rcu_read_unlock_bh();
973
974 return 0;
975}
976
977static unsigned int hnat_ipv4_get_nexthop(struct sk_buff *skb,
978 const struct net_device *out,
979 struct flow_offload_hw_path *hw_path)
980{
981 u32 nexthop;
982 struct neighbour *neigh;
983 struct dst_entry *dst = skb_dst(skb);
984 struct rtable *rt = (struct rtable *)dst;
985 struct net_device *dev = (__force struct net_device *)out;
986
987 if (hw_path->flags & FLOW_OFFLOAD_PATH_PPPOE) {
988 memcpy(eth_hdr(skb)->h_source, hw_path->eth_src, ETH_ALEN);
989 memcpy(eth_hdr(skb)->h_dest, hw_path->eth_dest, ETH_ALEN);
990 return 0;
991 }
992
993 rcu_read_lock_bh();
994 nexthop = (__force u32)rt_nexthop(rt, ip_hdr(skb)->daddr);
995 neigh = __ipv4_neigh_lookup_noref(dev, nexthop);
996 if (unlikely(!neigh)) {
997 dev_notice(hnat_priv->dev, "%s:No neigh (daddr=%pI4)\n", __func__,
998 &ip_hdr(skb)->daddr);
999 rcu_read_unlock_bh();
1000 return -1;
1001 }
1002
1003 /* why do we get all zero ethernet address ? */
1004 if (!is_valid_ether_addr(neigh->ha)) {
1005 rcu_read_unlock_bh();
1006 return -1;
1007 }
1008
1009 memcpy(eth_hdr(skb)->h_dest, neigh->ha, ETH_ALEN);
1010 memcpy(eth_hdr(skb)->h_source, out->dev_addr, ETH_ALEN);
1011
1012 rcu_read_unlock_bh();
1013
1014 return 0;
1015}
1016
1017static u16 ppe_get_chkbase(struct iphdr *iph)
1018{
1019 u16 org_chksum = ntohs(iph->check);
1020 u16 org_tot_len = ntohs(iph->tot_len);
1021 u16 org_id = ntohs(iph->id);
1022 u16 chksum_tmp, tot_len_tmp, id_tmp;
1023 u32 tmp = 0;
1024 u16 chksum_base = 0;
1025
1026 chksum_tmp = ~(org_chksum);
1027 tot_len_tmp = ~(org_tot_len);
1028 id_tmp = ~(org_id);
1029 tmp = chksum_tmp + tot_len_tmp + id_tmp;
1030 tmp = ((tmp >> 16) & 0x7) + (tmp & 0xFFFF);
1031 tmp = ((tmp >> 16) & 0x7) + (tmp & 0xFFFF);
1032 chksum_base = tmp & 0xFFFF;
1033
1034 return chksum_base;
1035}
1036
1037struct foe_entry ppe_fill_L2_info(struct ethhdr *eth, struct foe_entry entry,
1038 struct flow_offload_hw_path *hw_path)
1039{
developer5ffc5f12022-10-25 18:51:46 +08001040 switch ((int)entry.bfib1.pkt_type) {
developerfd40db22021-04-29 10:08:25 +08001041 case IPV4_HNAPT:
1042 case IPV4_HNAT:
1043 entry.ipv4_hnapt.dmac_hi = swab32(*((u32 *)eth->h_dest));
1044 entry.ipv4_hnapt.dmac_lo = swab16(*((u16 *)&eth->h_dest[4]));
1045 entry.ipv4_hnapt.smac_hi = swab32(*((u32 *)eth->h_source));
1046 entry.ipv4_hnapt.smac_lo = swab16(*((u16 *)&eth->h_source[4]));
1047 entry.ipv4_hnapt.pppoe_id = hw_path->pppoe_sid;
1048 break;
1049 case IPV4_DSLITE:
1050 case IPV4_MAP_E:
1051 case IPV6_6RD:
1052 case IPV6_5T_ROUTE:
1053 case IPV6_3T_ROUTE:
developer5ffc5f12022-10-25 18:51:46 +08001054 case IPV6_HNAPT:
1055 case IPV6_HNAT:
developerfd40db22021-04-29 10:08:25 +08001056 entry.ipv6_5t_route.dmac_hi = swab32(*((u32 *)eth->h_dest));
1057 entry.ipv6_5t_route.dmac_lo = swab16(*((u16 *)&eth->h_dest[4]));
1058 entry.ipv6_5t_route.smac_hi = swab32(*((u32 *)eth->h_source));
1059 entry.ipv6_5t_route.smac_lo =
1060 swab16(*((u16 *)&eth->h_source[4]));
1061 entry.ipv6_5t_route.pppoe_id = hw_path->pppoe_sid;
1062 break;
1063 }
1064 return entry;
1065}
1066
1067struct foe_entry ppe_fill_info_blk(struct ethhdr *eth, struct foe_entry entry,
1068 struct flow_offload_hw_path *hw_path)
1069{
1070 entry.bfib1.psn = (hw_path->flags & FLOW_OFFLOAD_PATH_PPPOE) ? 1 : 0;
1071 entry.bfib1.vlan_layer += (hw_path->flags & FLOW_OFFLOAD_PATH_VLAN) ? 1 : 0;
1072 entry.bfib1.vpm = (entry.bfib1.vlan_layer) ? 1 : 0;
developerfd40db22021-04-29 10:08:25 +08001073 entry.bfib1.cah = 1;
developer4164cfe2022-12-01 11:27:41 +08001074 entry.bfib1.time_stamp = (hnat_priv->data->version == MTK_HNAT_V2 ||
1075 hnat_priv->data->version == MTK_HNAT_V3) ?
developerfd40db22021-04-29 10:08:25 +08001076 readl(hnat_priv->fe_base + 0x0010) & (0xFF) :
1077 readl(hnat_priv->fe_base + 0x0010) & (0x7FFF);
1078
developer5ffc5f12022-10-25 18:51:46 +08001079 switch ((int)entry.bfib1.pkt_type) {
developerfd40db22021-04-29 10:08:25 +08001080 case IPV4_HNAPT:
1081 case IPV4_HNAT:
developer8116b0a2021-08-23 18:07:20 +08001082 if (hnat_priv->data->mcast &&
1083 is_multicast_ether_addr(&eth->h_dest[0])) {
developerfd40db22021-04-29 10:08:25 +08001084 entry.ipv4_hnapt.iblk2.mcast = 1;
developer4164cfe2022-12-01 11:27:41 +08001085 if (hnat_priv->data->version == MTK_HNAT_V1_3) {
developerfd40db22021-04-29 10:08:25 +08001086 entry.bfib1.sta = 1;
1087 entry.ipv4_hnapt.m_timestamp = foe_timestamp(hnat_priv);
1088 }
1089 } else {
1090 entry.ipv4_hnapt.iblk2.mcast = 0;
1091 }
1092
1093 entry.ipv4_hnapt.iblk2.port_ag =
developer4164cfe2022-12-01 11:27:41 +08001094 (hnat_priv->data->version == MTK_HNAT_V2 ||
1095 hnat_priv->data->version == MTK_HNAT_V3) ? 0xf : 0x3f;
developerfd40db22021-04-29 10:08:25 +08001096 break;
1097 case IPV4_DSLITE:
1098 case IPV4_MAP_E:
1099 case IPV6_6RD:
1100 case IPV6_5T_ROUTE:
1101 case IPV6_3T_ROUTE:
developer5ffc5f12022-10-25 18:51:46 +08001102 case IPV6_HNAPT:
1103 case IPV6_HNAT:
developer8116b0a2021-08-23 18:07:20 +08001104 if (hnat_priv->data->mcast &&
1105 is_multicast_ether_addr(&eth->h_dest[0])) {
developerfd40db22021-04-29 10:08:25 +08001106 entry.ipv6_5t_route.iblk2.mcast = 1;
developer4164cfe2022-12-01 11:27:41 +08001107 if (hnat_priv->data->version == MTK_HNAT_V1_3) {
developerfd40db22021-04-29 10:08:25 +08001108 entry.bfib1.sta = 1;
1109 entry.ipv4_hnapt.m_timestamp = foe_timestamp(hnat_priv);
1110 }
1111 } else {
1112 entry.ipv6_5t_route.iblk2.mcast = 0;
1113 }
1114
1115 entry.ipv6_5t_route.iblk2.port_ag =
developer4164cfe2022-12-01 11:27:41 +08001116 (hnat_priv->data->version == MTK_HNAT_V2 ||
1117 hnat_priv->data->version == MTK_HNAT_V3) ? 0xf : 0x3f;
developerfd40db22021-04-29 10:08:25 +08001118 break;
1119 }
1120 return entry;
1121}
1122
developerfd40db22021-04-29 10:08:25 +08001123static unsigned int skb_to_hnat_info(struct sk_buff *skb,
1124 const struct net_device *dev,
1125 struct foe_entry *foe,
1126 struct flow_offload_hw_path *hw_path)
1127{
1128 struct foe_entry entry = { 0 };
1129 int whnat = IS_WHNAT(dev);
1130 struct ethhdr *eth;
1131 struct iphdr *iph;
1132 struct ipv6hdr *ip6h;
1133 struct tcpudphdr _ports;
1134 const struct tcpudphdr *pptr;
developer5ffc5f12022-10-25 18:51:46 +08001135 struct nf_conn *ct;
1136 enum ip_conntrack_info ctinfo;
developerfd40db22021-04-29 10:08:25 +08001137 u32 gmac = NR_DISCARD;
1138 int udp = 0;
1139 u32 qid = 0;
developeraf07fad2021-11-19 17:53:42 +08001140 u32 port_id = 0;
developerfd40db22021-04-29 10:08:25 +08001141 int mape = 0;
1142
developer5ffc5f12022-10-25 18:51:46 +08001143 ct = nf_ct_get(skb, &ctinfo);
1144
developerfd40db22021-04-29 10:08:25 +08001145 if (ipv6_hdr(skb)->nexthdr == NEXTHDR_IPIP)
1146 /* point to ethernet header for DS-Lite and MapE */
1147 eth = (struct ethhdr *)(skb->data - ETH_HLEN);
1148 else
1149 eth = eth_hdr(skb);
developer8116b0a2021-08-23 18:07:20 +08001150
1151 /*do not bind multicast if PPE mcast not enable*/
1152 if (!hnat_priv->data->mcast && is_multicast_ether_addr(eth->h_dest))
1153 return 0;
developerfd40db22021-04-29 10:08:25 +08001154
1155 entry.bfib1.pkt_type = foe->udib1.pkt_type; /* Get packte type state*/
developerf94d8862022-03-29 10:11:17 +08001156 entry.bfib1.state = foe->udib1.state;
1157
developerd35bbcc2022-09-28 22:46:01 +08001158#if defined(CONFIG_MEDIATEK_NETSYS_V2) || defined(CONFIG_MEDIATEK_NETSYS_V3)
developerfd40db22021-04-29 10:08:25 +08001159 entry.bfib1.sp = foe->udib1.sp;
1160#endif
1161
1162 switch (ntohs(eth->h_proto)) {
1163 case ETH_P_IP:
1164 iph = ip_hdr(skb);
1165 switch (iph->protocol) {
1166 case IPPROTO_UDP:
1167 udp = 1;
1168 /* fallthrough */
1169 case IPPROTO_TCP:
1170 entry.ipv4_hnapt.etype = htons(ETH_P_IP);
1171
1172 /* DS-Lite WAN->LAN */
1173 if (entry.ipv4_hnapt.bfib1.pkt_type == IPV4_DSLITE ||
1174 entry.ipv4_hnapt.bfib1.pkt_type == IPV4_MAP_E) {
1175 entry.ipv4_dslite.sip = foe->ipv4_dslite.sip;
1176 entry.ipv4_dslite.dip = foe->ipv4_dslite.dip;
1177 entry.ipv4_dslite.sport =
1178 foe->ipv4_dslite.sport;
1179 entry.ipv4_dslite.dport =
1180 foe->ipv4_dslite.dport;
1181
developerd35bbcc2022-09-28 22:46:01 +08001182#if defined(CONFIG_MEDIATEK_NETSYS_V2) || defined(CONFIG_MEDIATEK_NETSYS_V3)
developerfd40db22021-04-29 10:08:25 +08001183 if (entry.bfib1.pkt_type == IPV4_MAP_E) {
1184 pptr = skb_header_pointer(skb,
1185 iph->ihl * 4,
1186 sizeof(_ports),
1187 &_ports);
developer4c32b7a2021-11-13 16:46:43 +08001188 if (unlikely(!pptr))
1189 return -1;
developerfd40db22021-04-29 10:08:25 +08001190
developerd35bbcc2022-09-28 22:46:01 +08001191 entry.ipv4_mape.new_sip =
developerfd40db22021-04-29 10:08:25 +08001192 ntohl(iph->saddr);
developerd35bbcc2022-09-28 22:46:01 +08001193 entry.ipv4_mape.new_dip =
developerfd40db22021-04-29 10:08:25 +08001194 ntohl(iph->daddr);
developerd35bbcc2022-09-28 22:46:01 +08001195 entry.ipv4_mape.new_sport =
developerfd40db22021-04-29 10:08:25 +08001196 ntohs(pptr->src);
developerd35bbcc2022-09-28 22:46:01 +08001197 entry.ipv4_mape.new_dport =
developerfd40db22021-04-29 10:08:25 +08001198 ntohs(pptr->dst);
1199 }
1200#endif
1201
1202 entry.ipv4_dslite.tunnel_sipv6_0 =
1203 foe->ipv4_dslite.tunnel_sipv6_0;
1204 entry.ipv4_dslite.tunnel_sipv6_1 =
1205 foe->ipv4_dslite.tunnel_sipv6_1;
1206 entry.ipv4_dslite.tunnel_sipv6_2 =
1207 foe->ipv4_dslite.tunnel_sipv6_2;
1208 entry.ipv4_dslite.tunnel_sipv6_3 =
1209 foe->ipv4_dslite.tunnel_sipv6_3;
1210
1211 entry.ipv4_dslite.tunnel_dipv6_0 =
1212 foe->ipv4_dslite.tunnel_dipv6_0;
1213 entry.ipv4_dslite.tunnel_dipv6_1 =
1214 foe->ipv4_dslite.tunnel_dipv6_1;
1215 entry.ipv4_dslite.tunnel_dipv6_2 =
1216 foe->ipv4_dslite.tunnel_dipv6_2;
1217 entry.ipv4_dslite.tunnel_dipv6_3 =
1218 foe->ipv4_dslite.tunnel_dipv6_3;
1219
1220 entry.ipv4_dslite.bfib1.rmt = 1;
1221 entry.ipv4_dslite.iblk2.dscp = iph->tos;
1222 entry.ipv4_dslite.vlan1 = hw_path->vlan_id;
1223 if (hnat_priv->data->per_flow_accounting)
1224 entry.ipv4_dslite.iblk2.mibf = 1;
1225
1226 } else {
1227 entry.ipv4_hnapt.iblk2.dscp = iph->tos;
1228 if (hnat_priv->data->per_flow_accounting)
1229 entry.ipv4_hnapt.iblk2.mibf = 1;
1230
1231 entry.ipv4_hnapt.vlan1 = hw_path->vlan_id;
1232
developerdfc8ef52022-12-06 14:00:09 +08001233 if (skb_vlan_tag_present(skb)) {
developerfd40db22021-04-29 10:08:25 +08001234 entry.bfib1.vlan_layer += 1;
1235
1236 if (entry.ipv4_hnapt.vlan1)
developerdfc8ef52022-12-06 14:00:09 +08001237 entry.ipv4_hnapt.vlan2 =
1238 skb->vlan_tci;
developerfd40db22021-04-29 10:08:25 +08001239 else
developerdfc8ef52022-12-06 14:00:09 +08001240 entry.ipv4_hnapt.vlan1 =
1241 skb->vlan_tci;
1242 }
developerfd40db22021-04-29 10:08:25 +08001243
1244 entry.ipv4_hnapt.sip = foe->ipv4_hnapt.sip;
1245 entry.ipv4_hnapt.dip = foe->ipv4_hnapt.dip;
1246 entry.ipv4_hnapt.sport = foe->ipv4_hnapt.sport;
1247 entry.ipv4_hnapt.dport = foe->ipv4_hnapt.dport;
1248
1249 entry.ipv4_hnapt.new_sip = ntohl(iph->saddr);
1250 entry.ipv4_hnapt.new_dip = ntohl(iph->daddr);
1251 }
1252
1253 entry.ipv4_hnapt.bfib1.udp = udp;
1254 if (IS_IPV4_HNAPT(foe)) {
1255 pptr = skb_header_pointer(skb, iph->ihl * 4,
1256 sizeof(_ports),
1257 &_ports);
developer4c32b7a2021-11-13 16:46:43 +08001258 if (unlikely(!pptr))
1259 return -1;
1260
developerfd40db22021-04-29 10:08:25 +08001261 entry.ipv4_hnapt.new_sport = ntohs(pptr->src);
1262 entry.ipv4_hnapt.new_dport = ntohs(pptr->dst);
1263 }
1264
1265 break;
1266
1267 default:
1268 return -1;
1269 }
1270 trace_printk(
1271 "[%s]skb->head=%p, skb->data=%p,ip_hdr=%p, skb->len=%d, skb->data_len=%d\n",
1272 __func__, skb->head, skb->data, iph, skb->len,
1273 skb->data_len);
1274 break;
1275
1276 case ETH_P_IPV6:
1277 ip6h = ipv6_hdr(skb);
1278 switch (ip6h->nexthdr) {
1279 case NEXTHDR_UDP:
1280 udp = 1;
1281 /* fallthrough */
1282 case NEXTHDR_TCP: /* IPv6-5T or IPv6-3T */
1283 entry.ipv6_5t_route.etype = htons(ETH_P_IPV6);
1284
1285 entry.ipv6_5t_route.vlan1 = hw_path->vlan_id;
1286
developerdfc8ef52022-12-06 14:00:09 +08001287 if (skb_vlan_tag_present(skb)) {
developerfd40db22021-04-29 10:08:25 +08001288 entry.bfib1.vlan_layer += 1;
1289
1290 if (entry.ipv6_5t_route.vlan1)
developerdfc8ef52022-12-06 14:00:09 +08001291 entry.ipv6_5t_route.vlan2 =
1292 skb->vlan_tci;
developerfd40db22021-04-29 10:08:25 +08001293 else
developerdfc8ef52022-12-06 14:00:09 +08001294 entry.ipv6_5t_route.vlan1 =
1295 skb->vlan_tci;
developerfd40db22021-04-29 10:08:25 +08001296 }
1297
1298 if (hnat_priv->data->per_flow_accounting)
1299 entry.ipv6_5t_route.iblk2.mibf = 1;
1300 entry.ipv6_5t_route.bfib1.udp = udp;
1301
1302 if (IS_IPV6_6RD(foe)) {
1303 entry.ipv6_5t_route.bfib1.rmt = 1;
1304 entry.ipv6_6rd.tunnel_sipv4 =
1305 foe->ipv6_6rd.tunnel_sipv4;
1306 entry.ipv6_6rd.tunnel_dipv4 =
1307 foe->ipv6_6rd.tunnel_dipv4;
1308 }
1309
1310 entry.ipv6_3t_route.ipv6_sip0 =
1311 foe->ipv6_3t_route.ipv6_sip0;
1312 entry.ipv6_3t_route.ipv6_sip1 =
1313 foe->ipv6_3t_route.ipv6_sip1;
1314 entry.ipv6_3t_route.ipv6_sip2 =
1315 foe->ipv6_3t_route.ipv6_sip2;
1316 entry.ipv6_3t_route.ipv6_sip3 =
1317 foe->ipv6_3t_route.ipv6_sip3;
1318
1319 entry.ipv6_3t_route.ipv6_dip0 =
1320 foe->ipv6_3t_route.ipv6_dip0;
1321 entry.ipv6_3t_route.ipv6_dip1 =
1322 foe->ipv6_3t_route.ipv6_dip1;
1323 entry.ipv6_3t_route.ipv6_dip2 =
1324 foe->ipv6_3t_route.ipv6_dip2;
1325 entry.ipv6_3t_route.ipv6_dip3 =
1326 foe->ipv6_3t_route.ipv6_dip3;
1327
developer729f0272021-06-09 17:28:38 +08001328 if (IS_IPV6_3T_ROUTE(foe)) {
1329 entry.ipv6_3t_route.prot =
1330 foe->ipv6_3t_route.prot;
1331 entry.ipv6_3t_route.hph =
1332 foe->ipv6_3t_route.hph;
1333 }
1334
developerfd40db22021-04-29 10:08:25 +08001335 if (IS_IPV6_5T_ROUTE(foe) || IS_IPV6_6RD(foe)) {
1336 entry.ipv6_5t_route.sport =
1337 foe->ipv6_5t_route.sport;
1338 entry.ipv6_5t_route.dport =
1339 foe->ipv6_5t_route.dport;
1340 }
developer5ffc5f12022-10-25 18:51:46 +08001341
1342#if defined(CONFIG_MEDIATEK_NETSYS_V3)
1343 if (ct && (ct->status & IPS_SRC_NAT)) {
1344 entry.bfib1.pkt_type = IPV6_HNAPT;
1345
1346 if (IS_WAN(dev) || IS_DSA_WAN(dev)) {
1347 entry.ipv6_hnapt.eg_ipv6_dir =
1348 IPV6_SNAT;
1349 entry.ipv6_hnapt.new_ipv6_ip0 =
1350 ntohl(ip6h->saddr.s6_addr32[0]);
1351 entry.ipv6_hnapt.new_ipv6_ip1 =
1352 ntohl(ip6h->saddr.s6_addr32[1]);
1353 entry.ipv6_hnapt.new_ipv6_ip2 =
1354 ntohl(ip6h->saddr.s6_addr32[2]);
1355 entry.ipv6_hnapt.new_ipv6_ip3 =
1356 ntohl(ip6h->saddr.s6_addr32[3]);
1357 } else {
1358 entry.ipv6_hnapt.eg_ipv6_dir =
1359 IPV6_DNAT;
1360 entry.ipv6_hnapt.new_ipv6_ip0 =
1361 ntohl(ip6h->daddr.s6_addr32[0]);
1362 entry.ipv6_hnapt.new_ipv6_ip1 =
1363 ntohl(ip6h->daddr.s6_addr32[1]);
1364 entry.ipv6_hnapt.new_ipv6_ip2 =
1365 ntohl(ip6h->daddr.s6_addr32[2]);
1366 entry.ipv6_hnapt.new_ipv6_ip3 =
1367 ntohl(ip6h->daddr.s6_addr32[3]);
1368 }
1369
1370 pptr = skb_header_pointer(skb, IPV6_HDR_LEN,
1371 sizeof(_ports),
1372 &_ports);
1373 if (unlikely(!pptr))
1374 return -1;
1375
1376 entry.ipv6_hnapt.new_sport = ntohs(pptr->src);
1377 entry.ipv6_hnapt.new_dport = ntohs(pptr->dst);
1378 }
1379#endif
1380
developerfd40db22021-04-29 10:08:25 +08001381 entry.ipv6_5t_route.iblk2.dscp =
1382 (ip6h->priority << 4 |
1383 (ip6h->flow_lbl[0] >> 4));
1384 break;
1385
1386 case NEXTHDR_IPIP:
1387 if ((!mape_toggle &&
1388 entry.bfib1.pkt_type == IPV4_DSLITE) ||
1389 (mape_toggle &&
1390 entry.bfib1.pkt_type == IPV4_MAP_E)) {
1391 /* DS-Lite LAN->WAN */
1392 entry.ipv4_dslite.bfib1.udp =
1393 foe->ipv4_dslite.bfib1.udp;
1394 entry.ipv4_dslite.sip = foe->ipv4_dslite.sip;
1395 entry.ipv4_dslite.dip = foe->ipv4_dslite.dip;
1396 entry.ipv4_dslite.sport =
1397 foe->ipv4_dslite.sport;
1398 entry.ipv4_dslite.dport =
1399 foe->ipv4_dslite.dport;
1400
1401 entry.ipv4_dslite.tunnel_sipv6_0 =
1402 ntohl(ip6h->saddr.s6_addr32[0]);
1403 entry.ipv4_dslite.tunnel_sipv6_1 =
1404 ntohl(ip6h->saddr.s6_addr32[1]);
1405 entry.ipv4_dslite.tunnel_sipv6_2 =
1406 ntohl(ip6h->saddr.s6_addr32[2]);
1407 entry.ipv4_dslite.tunnel_sipv6_3 =
1408 ntohl(ip6h->saddr.s6_addr32[3]);
1409
1410 entry.ipv4_dslite.tunnel_dipv6_0 =
1411 ntohl(ip6h->daddr.s6_addr32[0]);
1412 entry.ipv4_dslite.tunnel_dipv6_1 =
1413 ntohl(ip6h->daddr.s6_addr32[1]);
1414 entry.ipv4_dslite.tunnel_dipv6_2 =
1415 ntohl(ip6h->daddr.s6_addr32[2]);
1416 entry.ipv4_dslite.tunnel_dipv6_3 =
1417 ntohl(ip6h->daddr.s6_addr32[3]);
1418
1419 ppe_fill_flow_lbl(&entry, ip6h);
1420
1421 entry.ipv4_dslite.priority = ip6h->priority;
1422 entry.ipv4_dslite.hop_limit = ip6h->hop_limit;
1423 entry.ipv4_dslite.vlan1 = hw_path->vlan_id;
1424 if (hnat_priv->data->per_flow_accounting)
1425 entry.ipv4_dslite.iblk2.mibf = 1;
developer25fc8c02022-05-06 16:24:02 +08001426 /* Map-E LAN->WAN record inner IPv4 header info. */
developer8c707df2022-10-24 14:09:00 +08001427#if defined(CONFIG_MEDIATEK_NETSYS_V2) || defined(CONFIG_MEDIATEK_NETSYS_V3)
developer25fc8c02022-05-06 16:24:02 +08001428 if (mape_toggle) {
1429 entry.ipv4_dslite.iblk2.dscp = foe->ipv4_dslite.iblk2.dscp;
developerd35bbcc2022-09-28 22:46:01 +08001430 entry.ipv4_mape.new_sip = foe->ipv4_mape.new_sip;
1431 entry.ipv4_mape.new_dip = foe->ipv4_mape.new_dip;
1432 entry.ipv4_mape.new_sport = foe->ipv4_mape.new_sport;
1433 entry.ipv4_mape.new_dport = foe->ipv4_mape.new_dport;
developer25fc8c02022-05-06 16:24:02 +08001434 }
1435#endif
developerfd40db22021-04-29 10:08:25 +08001436 } else if (mape_toggle &&
1437 entry.bfib1.pkt_type == IPV4_HNAPT) {
1438 /* MapE LAN -> WAN */
1439 mape = 1;
1440 entry.ipv4_hnapt.iblk2.dscp =
1441 foe->ipv4_hnapt.iblk2.dscp;
1442 if (hnat_priv->data->per_flow_accounting)
1443 entry.ipv4_hnapt.iblk2.mibf = 1;
1444
developerbb816412021-06-11 15:43:44 +08001445 if (IS_GMAC1_MODE)
1446 entry.ipv4_hnapt.vlan1 = 1;
1447 else
1448 entry.ipv4_hnapt.vlan1 = hw_path->vlan_id;
developerfd40db22021-04-29 10:08:25 +08001449
1450 entry.ipv4_hnapt.sip = foe->ipv4_hnapt.sip;
1451 entry.ipv4_hnapt.dip = foe->ipv4_hnapt.dip;
1452 entry.ipv4_hnapt.sport = foe->ipv4_hnapt.sport;
1453 entry.ipv4_hnapt.dport = foe->ipv4_hnapt.dport;
1454
1455 entry.ipv4_hnapt.new_sip =
1456 foe->ipv4_hnapt.new_sip;
1457 entry.ipv4_hnapt.new_dip =
1458 foe->ipv4_hnapt.new_dip;
1459 entry.ipv4_hnapt.etype = htons(ETH_P_IP);
1460
developer34028fb2022-01-11 13:51:29 +08001461 if (IS_HQOS_MODE) {
developeraf07fad2021-11-19 17:53:42 +08001462 entry.ipv4_hnapt.iblk2.qid =
developer4164cfe2022-12-01 11:27:41 +08001463 (hnat_priv->data->version ==
1464 MTK_HNAT_V2 ||
1465 hnat_priv->data->version ==
1466 MTK_HNAT_V3) ?
developeraf07fad2021-11-19 17:53:42 +08001467 skb->mark & 0x7f : skb->mark & 0xf;
developerd35bbcc2022-09-28 22:46:01 +08001468#if defined(CONFIG_MEDIATEK_NETSYS_V3)
developer934756a2022-11-18 14:51:34 +08001469 if ((IS_HQOS_UL_MODE && IS_WAN(dev)) ||
developer493adc32022-11-29 22:34:18 +08001470 (IS_HQOS_DL_MODE &&
1471 IS_LAN_GRP(dev)) ||
developer934756a2022-11-18 14:51:34 +08001472 (IS_PPPQ_MODE &&
1473 IS_PPPQ_PATH(dev, skb)))
developer47545a32022-11-15 16:06:58 +08001474 entry.ipv4_hnapt.tport_id = 1;
1475 else
1476 entry.ipv4_hnapt.tport_id = 0;
developerd35bbcc2022-09-28 22:46:01 +08001477#else
developeraf07fad2021-11-19 17:53:42 +08001478 entry.ipv4_hnapt.iblk2.fqos = 1;
developerd35bbcc2022-09-28 22:46:01 +08001479#endif
developeraf07fad2021-11-19 17:53:42 +08001480 }
developerfd40db22021-04-29 10:08:25 +08001481
1482 entry.ipv4_hnapt.bfib1.udp =
1483 foe->ipv4_hnapt.bfib1.udp;
1484
1485 entry.ipv4_hnapt.new_sport =
1486 foe->ipv4_hnapt.new_sport;
1487 entry.ipv4_hnapt.new_dport =
1488 foe->ipv4_hnapt.new_dport;
1489 mape_l2w_v6h = *ip6h;
1490 }
1491 break;
1492
1493 default:
1494 return -1;
1495 }
1496
1497 trace_printk(
1498 "[%s]skb->head=%p, skb->data=%p,ipv6_hdr=%p, skb->len=%d, skb->data_len=%d\n",
1499 __func__, skb->head, skb->data, ip6h, skb->len,
1500 skb->data_len);
1501 break;
1502
1503 default:
developerfd40db22021-04-29 10:08:25 +08001504 iph = ip_hdr(skb);
1505 switch (entry.bfib1.pkt_type) {
1506 case IPV6_6RD: /* 6RD LAN->WAN */
1507 entry.ipv6_6rd.ipv6_sip0 = foe->ipv6_6rd.ipv6_sip0;
1508 entry.ipv6_6rd.ipv6_sip1 = foe->ipv6_6rd.ipv6_sip1;
1509 entry.ipv6_6rd.ipv6_sip2 = foe->ipv6_6rd.ipv6_sip2;
1510 entry.ipv6_6rd.ipv6_sip3 = foe->ipv6_6rd.ipv6_sip3;
1511
1512 entry.ipv6_6rd.ipv6_dip0 = foe->ipv6_6rd.ipv6_dip0;
1513 entry.ipv6_6rd.ipv6_dip1 = foe->ipv6_6rd.ipv6_dip1;
1514 entry.ipv6_6rd.ipv6_dip2 = foe->ipv6_6rd.ipv6_dip2;
1515 entry.ipv6_6rd.ipv6_dip3 = foe->ipv6_6rd.ipv6_dip3;
1516
1517 entry.ipv6_6rd.sport = foe->ipv6_6rd.sport;
1518 entry.ipv6_6rd.dport = foe->ipv6_6rd.dport;
1519 entry.ipv6_6rd.tunnel_sipv4 = ntohl(iph->saddr);
1520 entry.ipv6_6rd.tunnel_dipv4 = ntohl(iph->daddr);
1521 entry.ipv6_6rd.hdr_chksum = ppe_get_chkbase(iph);
1522 entry.ipv6_6rd.flag = (ntohs(iph->frag_off) >> 13);
1523 entry.ipv6_6rd.ttl = iph->ttl;
1524 entry.ipv6_6rd.dscp = iph->tos;
1525 entry.ipv6_6rd.per_flow_6rd_id = 1;
1526 entry.ipv6_6rd.vlan1 = hw_path->vlan_id;
1527 if (hnat_priv->data->per_flow_accounting)
1528 entry.ipv6_6rd.iblk2.mibf = 1;
1529 break;
1530
1531 default:
1532 return -1;
1533 }
1534 }
1535
1536 /* Fill Layer2 Info.*/
1537 entry = ppe_fill_L2_info(eth, entry, hw_path);
1538
1539 /* Fill Info Blk*/
1540 entry = ppe_fill_info_blk(eth, entry, hw_path);
1541
1542 if (IS_LAN(dev)) {
1543 if (IS_DSA_LAN(dev))
developeraf07fad2021-11-19 17:53:42 +08001544 port_id = hnat_dsa_fill_stag(dev, &entry, hw_path,
1545 ntohs(eth->h_proto),
1546 mape);
developerfd40db22021-04-29 10:08:25 +08001547
1548 if (IS_BOND_MODE)
1549 gmac = ((skb_hnat_entry(skb) >> 1) % hnat_priv->gmac_num) ?
1550 NR_GMAC2_PORT : NR_GMAC1_PORT;
1551 else
1552 gmac = NR_GMAC1_PORT;
developerd35bbcc2022-09-28 22:46:01 +08001553 } else if (IS_LAN2(dev)) {
1554 gmac = NR_GMAC3_PORT;
developerfd40db22021-04-29 10:08:25 +08001555 } else if (IS_WAN(dev)) {
1556 if (IS_DSA_WAN(dev))
developeraf07fad2021-11-19 17:53:42 +08001557 port_id = hnat_dsa_fill_stag(dev,&entry, hw_path,
1558 ntohs(eth->h_proto),
1559 mape);
developerfd40db22021-04-29 10:08:25 +08001560 if (mape_toggle && mape == 1) {
1561 gmac = NR_PDMA_PORT;
1562 /* Set act_dp = wan_dev */
1563 entry.ipv4_hnapt.act_dp = dev->ifindex;
1564 } else {
1565 gmac = (IS_GMAC1_MODE) ? NR_GMAC1_PORT : NR_GMAC2_PORT;
1566 }
developerd35bbcc2022-09-28 22:46:01 +08001567 } else if (IS_EXT(dev) && (FROM_GE_PPD(skb) || FROM_GE_LAN_GRP(skb) ||
developer99506e52021-06-30 22:03:02 +08001568 FROM_GE_WAN(skb) || FROM_GE_VIRTUAL(skb) || FROM_WED(skb))) {
developerfd40db22021-04-29 10:08:25 +08001569 if (!hnat_priv->data->whnat && IS_GMAC1_MODE) {
1570 entry.bfib1.vpm = 1;
1571 entry.bfib1.vlan_layer = 1;
1572
1573 if (FROM_GE_LAN(skb))
1574 entry.ipv4_hnapt.vlan1 = 1;
1575 else if (FROM_GE_WAN(skb) || FROM_GE_VIRTUAL(skb))
1576 entry.ipv4_hnapt.vlan1 = 2;
1577 }
1578
1579 trace_printk("learn of lan or wan(iif=%x) --> %s(ext)\n",
1580 skb_hnat_iface(skb), dev->name);
1581 /* To CPU then stolen by pre-routing hant hook of LAN/WAN
1582 * Current setting is PDMA RX.
1583 */
1584 gmac = NR_PDMA_PORT;
1585 if (IS_IPV4_GRP(foe))
1586 entry.ipv4_hnapt.act_dp = dev->ifindex;
1587 else
1588 entry.ipv6_5t_route.act_dp = dev->ifindex;
1589 } else {
1590 printk_ratelimited(KERN_WARNING
1591 "Unknown case of dp, iif=%x --> %s\n",
1592 skb_hnat_iface(skb), dev->name);
1593
1594 return 0;
1595 }
1596
developerafff5662022-06-29 10:09:56 +08001597 if (IS_HQOS_MODE || skb->mark >= MAX_PPPQ_PORT_NUM)
developeraf07fad2021-11-19 17:53:42 +08001598 qid = skb->mark & (MTK_QDMA_TX_MASK);
developer934756a2022-11-18 14:51:34 +08001599 else if (IS_PPPQ_MODE && IS_PPPQ_PATH(dev, skb))
developeraf07fad2021-11-19 17:53:42 +08001600 qid = port_id & MTK_QDMA_TX_MASK;
1601 else
1602 qid = 0;
developerfd40db22021-04-29 10:08:25 +08001603
1604 if (IS_IPV4_GRP(foe)) {
1605 entry.ipv4_hnapt.iblk2.dp = gmac;
1606 entry.ipv4_hnapt.iblk2.port_mg =
developer4164cfe2022-12-01 11:27:41 +08001607 (hnat_priv->data->version == MTK_HNAT_V1_1) ? 0x3f : 0;
developer24948202021-11-24 17:38:27 +08001608
developeraf07fad2021-11-19 17:53:42 +08001609 if (qos_toggle) {
developer4164cfe2022-12-01 11:27:41 +08001610 if (hnat_priv->data->version == MTK_HNAT_V2 ||
1611 hnat_priv->data->version == MTK_HNAT_V3) {
developeraf07fad2021-11-19 17:53:42 +08001612 entry.ipv4_hnapt.iblk2.qid = qid & 0x7f;
1613 } else {
1614 /* qid[5:0]= port_mg[1:0]+ qid[3:0] */
1615 entry.ipv4_hnapt.iblk2.qid = qid & 0xf;
developer4164cfe2022-12-01 11:27:41 +08001616 if (hnat_priv->data->version != MTK_HNAT_V1_1)
developeraf07fad2021-11-19 17:53:42 +08001617 entry.ipv4_hnapt.iblk2.port_mg |=
1618 ((qid >> 4) & 0x3);
developerfd40db22021-04-29 10:08:25 +08001619
developerd35bbcc2022-09-28 22:46:01 +08001620 if (((IS_EXT(dev) && (FROM_GE_LAN_GRP(skb) ||
developeraf07fad2021-11-19 17:53:42 +08001621 FROM_GE_WAN(skb) || FROM_GE_VIRTUAL(skb))) ||
1622 ((mape_toggle && mape == 1) && !FROM_EXT(skb))) &&
1623 (!whnat)) {
1624 entry.ipv4_hnapt.etype = htons(HQOS_MAGIC_TAG);
1625 entry.ipv4_hnapt.vlan1 = skb_hnat_entry(skb);
1626 entry.bfib1.vlan_layer = 1;
1627 }
developerfd40db22021-04-29 10:08:25 +08001628 }
developerfd40db22021-04-29 10:08:25 +08001629
developer34028fb2022-01-11 13:51:29 +08001630 if (FROM_EXT(skb) || skb_hnat_sport(skb) == NR_QDMA_PORT ||
1631 (IS_PPPQ_MODE && !IS_DSA_LAN(dev) && !IS_DSA_WAN(dev)))
developeraf07fad2021-11-19 17:53:42 +08001632 entry.ipv4_hnapt.iblk2.fqos = 0;
1633 else
developerd35bbcc2022-09-28 22:46:01 +08001634#if defined(CONFIG_MEDIATEK_NETSYS_V3)
developer934756a2022-11-18 14:51:34 +08001635 if ((IS_HQOS_UL_MODE && IS_WAN(dev)) ||
developer493adc32022-11-29 22:34:18 +08001636 (IS_HQOS_DL_MODE && IS_LAN_GRP(dev)) ||
developer934756a2022-11-18 14:51:34 +08001637 (IS_PPPQ_MODE &&
1638 IS_PPPQ_PATH(dev, skb)))
developer47545a32022-11-15 16:06:58 +08001639 entry.ipv4_hnapt.tport_id = 1;
1640 else
1641 entry.ipv4_hnapt.tport_id = 0;
developerd35bbcc2022-09-28 22:46:01 +08001642#else
developer399ec072022-06-24 16:07:41 +08001643 entry.ipv4_hnapt.iblk2.fqos =
developer934756a2022-11-18 14:51:34 +08001644 (!IS_PPPQ_MODE ||
1645 (IS_PPPQ_MODE &&
1646 IS_PPPQ_PATH(dev, skb)));
developerd35bbcc2022-09-28 22:46:01 +08001647#endif
developeraf07fad2021-11-19 17:53:42 +08001648 } else {
developerfd40db22021-04-29 10:08:25 +08001649 entry.ipv4_hnapt.iblk2.fqos = 0;
developeraf07fad2021-11-19 17:53:42 +08001650 }
developerfd40db22021-04-29 10:08:25 +08001651 } else {
1652 entry.ipv6_5t_route.iblk2.dp = gmac;
1653 entry.ipv6_5t_route.iblk2.port_mg =
developer4164cfe2022-12-01 11:27:41 +08001654 (hnat_priv->data->version == MTK_HNAT_V1_1) ? 0x3f : 0;
developer24948202021-11-24 17:38:27 +08001655
developeraf07fad2021-11-19 17:53:42 +08001656 if (qos_toggle) {
developer4164cfe2022-12-01 11:27:41 +08001657 if (hnat_priv->data->version == MTK_HNAT_V2 ||
1658 hnat_priv->data->version == MTK_HNAT_V3) {
developeraf07fad2021-11-19 17:53:42 +08001659 entry.ipv6_5t_route.iblk2.qid = qid & 0x7f;
1660 } else {
1661 /* qid[5:0]= port_mg[1:0]+ qid[3:0] */
1662 entry.ipv6_5t_route.iblk2.qid = qid & 0xf;
developer4164cfe2022-12-01 11:27:41 +08001663 if (hnat_priv->data->version != MTK_HNAT_V1_1)
developeraf07fad2021-11-19 17:53:42 +08001664 entry.ipv6_5t_route.iblk2.port_mg |=
1665 ((qid >> 4) & 0x3);
developerfd40db22021-04-29 10:08:25 +08001666
developerd35bbcc2022-09-28 22:46:01 +08001667 if (IS_EXT(dev) && (FROM_GE_LAN_GRP(skb) ||
developeraf07fad2021-11-19 17:53:42 +08001668 FROM_GE_WAN(skb) || FROM_GE_VIRTUAL(skb)) &&
1669 (!whnat)) {
1670 entry.ipv6_5t_route.etype = htons(HQOS_MAGIC_TAG);
1671 entry.ipv6_5t_route.vlan1 = skb_hnat_entry(skb);
1672 entry.bfib1.vlan_layer = 1;
1673 }
developerfd40db22021-04-29 10:08:25 +08001674 }
developerfd40db22021-04-29 10:08:25 +08001675
developer34028fb2022-01-11 13:51:29 +08001676 if (FROM_EXT(skb) ||
1677 (IS_PPPQ_MODE && !IS_DSA_LAN(dev) && !IS_DSA_WAN(dev)))
developeraf07fad2021-11-19 17:53:42 +08001678 entry.ipv6_5t_route.iblk2.fqos = 0;
1679 else
developerd35bbcc2022-09-28 22:46:01 +08001680#if defined(CONFIG_MEDIATEK_NETSYS_V3)
developer934756a2022-11-18 14:51:34 +08001681 if ((IS_HQOS_UL_MODE && IS_WAN(dev)) ||
developer493adc32022-11-29 22:34:18 +08001682 (IS_HQOS_DL_MODE && IS_LAN_GRP(dev)) ||
developer934756a2022-11-18 14:51:34 +08001683 (IS_PPPQ_MODE &&
1684 IS_PPPQ_PATH(dev, skb)))
developer47545a32022-11-15 16:06:58 +08001685 entry.ipv6_5t_route.tport_id = 1;
1686 else
1687 entry.ipv6_5t_route.tport_id = 0;
developerd35bbcc2022-09-28 22:46:01 +08001688#else
developer399ec072022-06-24 16:07:41 +08001689 entry.ipv6_5t_route.iblk2.fqos =
developer934756a2022-11-18 14:51:34 +08001690 (!IS_PPPQ_MODE ||
1691 (IS_PPPQ_MODE &&
1692 IS_PPPQ_PATH(dev, skb)));
developerd35bbcc2022-09-28 22:46:01 +08001693#endif
developeraf07fad2021-11-19 17:53:42 +08001694 } else {
developerfd40db22021-04-29 10:08:25 +08001695 entry.ipv6_5t_route.iblk2.fqos = 0;
developeraf07fad2021-11-19 17:53:42 +08001696 }
developerfd40db22021-04-29 10:08:25 +08001697 }
1698
developer60e60962021-06-15 21:05:07 +08001699 /* The INFO2.port_mg and 2nd VLAN ID fields of PPE entry are redefined
1700 * by Wi-Fi whnat engine. These data and INFO2.dp will be updated and
1701 * the entry is set to BIND state in mtk_sw_nat_hook_tx().
1702 */
developer7b36dca2022-05-19 18:29:10 +08001703 if (!whnat) {
1704 entry.bfib1.ttl = 1;
developer60e60962021-06-15 21:05:07 +08001705 entry.bfib1.state = BIND;
developer7b36dca2022-05-19 18:29:10 +08001706 }
developer60e60962021-06-15 21:05:07 +08001707
developerbc552cc2022-03-15 16:19:27 +08001708 wmb();
developerfd40db22021-04-29 10:08:25 +08001709 memcpy(foe, &entry, sizeof(entry));
1710 /*reset statistic for this entry*/
developer577ad2f2022-11-28 10:33:36 +08001711 if (hnat_priv->data->per_flow_accounting &&
1712 skb_hnat_entry(skb) < hnat_priv->foe_etry_num &&
1713 skb_hnat_ppe(skb) < CFG_PPE_NUM)
developer471f6562021-05-10 20:48:34 +08001714 memset(&hnat_priv->acct[skb_hnat_ppe(skb)][skb_hnat_entry(skb)],
1715 0, sizeof(struct mib_entry));
developerfd40db22021-04-29 10:08:25 +08001716
developerfdfe1572021-09-13 16:56:33 +08001717 skb_hnat_filled(skb) = HNAT_INFO_FILLED;
developerfd40db22021-04-29 10:08:25 +08001718
1719 return 0;
1720}
1721
1722int mtk_sw_nat_hook_tx(struct sk_buff *skb, int gmac_no)
1723{
1724 struct foe_entry *entry;
1725 struct ethhdr *eth;
developerbc552cc2022-03-15 16:19:27 +08001726 struct hnat_bind_info_blk bfib1_tx;
developerfd40db22021-04-29 10:08:25 +08001727
developerfdfe1572021-09-13 16:56:33 +08001728 if (skb_hnat_alg(skb) || !is_hnat_info_filled(skb) ||
1729 !is_magic_tag_valid(skb) || !IS_SPACE_AVAILABLE_HEAD(skb))
developerfd40db22021-04-29 10:08:25 +08001730 return NF_ACCEPT;
1731
1732 trace_printk(
1733 "[%s]entry=%x reason=%x gmac_no=%x wdmaid=%x rxid=%x wcid=%x bssid=%x\n",
1734 __func__, skb_hnat_entry(skb), skb_hnat_reason(skb), gmac_no,
1735 skb_hnat_wdma_id(skb), skb_hnat_bss_id(skb),
1736 skb_hnat_wc_id(skb), skb_hnat_rx_id(skb));
1737
developer99506e52021-06-30 22:03:02 +08001738 if ((gmac_no != NR_WDMA0_PORT) && (gmac_no != NR_WDMA1_PORT) &&
1739 (gmac_no != NR_WHNAT_WDMA_PORT))
1740 return NF_ACCEPT;
1741
developerc0419aa2022-12-07 15:56:36 +08001742 if (unlikely(!skb_mac_header_was_set(skb)))
1743 return NF_ACCEPT;
1744
developerfd40db22021-04-29 10:08:25 +08001745 if (!skb_hnat_is_hashed(skb))
1746 return NF_ACCEPT;
1747
developer955a6f62021-07-26 10:54:39 +08001748 if (skb_hnat_entry(skb) >= hnat_priv->foe_etry_num ||
1749 skb_hnat_ppe(skb) >= CFG_PPE_NUM)
1750 return NF_ACCEPT;
1751
developer471f6562021-05-10 20:48:34 +08001752 entry = &hnat_priv->foe_table_cpu[skb_hnat_ppe(skb)][skb_hnat_entry(skb)];
developerfd40db22021-04-29 10:08:25 +08001753 if (entry_hnat_is_bound(entry))
1754 return NF_ACCEPT;
1755
1756 if (skb_hnat_reason(skb) != HIT_UNBIND_RATE_REACH)
1757 return NF_ACCEPT;
1758
1759 eth = eth_hdr(skb);
developerbc552cc2022-03-15 16:19:27 +08001760 memcpy(&bfib1_tx, &entry->bfib1, sizeof(entry->bfib1));
developer8116b0a2021-08-23 18:07:20 +08001761
1762 /*not bind multicast if PPE mcast not enable*/
developerfdfe1572021-09-13 16:56:33 +08001763 if (!hnat_priv->data->mcast) {
1764 if (is_multicast_ether_addr(eth->h_dest))
1765 return NF_ACCEPT;
1766
1767 if (IS_IPV4_GRP(entry))
1768 entry->ipv4_hnapt.iblk2.mcast = 0;
1769 else
1770 entry->ipv6_5t_route.iblk2.mcast = 0;
1771 }
developerfd40db22021-04-29 10:08:25 +08001772
1773 /* Some mt_wifi virtual interfaces, such as apcli,
1774 * will change the smac for specail purpose.
1775 */
developer5ffc5f12022-10-25 18:51:46 +08001776 switch ((int)bfib1_tx.pkt_type) {
developerfd40db22021-04-29 10:08:25 +08001777 case IPV4_HNAPT:
1778 case IPV4_HNAT:
1779 entry->ipv4_hnapt.smac_hi = swab32(*((u32 *)eth->h_source));
1780 entry->ipv4_hnapt.smac_lo = swab16(*((u16 *)&eth->h_source[4]));
1781 break;
1782 case IPV4_DSLITE:
1783 case IPV4_MAP_E:
1784 case IPV6_6RD:
1785 case IPV6_5T_ROUTE:
1786 case IPV6_3T_ROUTE:
developer5ffc5f12022-10-25 18:51:46 +08001787 case IPV6_HNAPT:
1788 case IPV6_HNAT:
developerfd40db22021-04-29 10:08:25 +08001789 entry->ipv6_5t_route.smac_hi = swab32(*((u32 *)eth->h_source));
1790 entry->ipv6_5t_route.smac_lo = swab16(*((u16 *)&eth->h_source[4]));
1791 break;
1792 }
1793
developer0ff76882021-10-26 10:54:13 +08001794 if (skb->vlan_tci) {
developerbc552cc2022-03-15 16:19:27 +08001795 bfib1_tx.vlan_layer = 1;
1796 bfib1_tx.vpm = 1;
developer0ff76882021-10-26 10:54:13 +08001797 if (IS_IPV4_GRP(entry)) {
1798 entry->ipv4_hnapt.etype = htons(ETH_P_8021Q);
developer00a07372022-03-11 16:04:34 +08001799 entry->ipv4_hnapt.vlan1 = skb->vlan_tci;
developer0ff76882021-10-26 10:54:13 +08001800 } else if (IS_IPV6_GRP(entry)) {
1801 entry->ipv6_5t_route.etype = htons(ETH_P_8021Q);
developer00a07372022-03-11 16:04:34 +08001802 entry->ipv6_5t_route.vlan1 = skb->vlan_tci;
developer0ff76882021-10-26 10:54:13 +08001803 }
1804 } else {
developerbc552cc2022-03-15 16:19:27 +08001805 bfib1_tx.vpm = 0;
1806 bfib1_tx.vlan_layer = 0;
developer0ff76882021-10-26 10:54:13 +08001807 }
developer60e60962021-06-15 21:05:07 +08001808
developerfd40db22021-04-29 10:08:25 +08001809 /* MT7622 wifi hw_nat not support QoS */
1810 if (IS_IPV4_GRP(entry)) {
1811 entry->ipv4_hnapt.iblk2.fqos = 0;
developer4164cfe2022-12-01 11:27:41 +08001812 if ((hnat_priv->data->version == MTK_HNAT_V1_2 &&
developere567ad32021-05-25 17:16:17 +08001813 gmac_no == NR_WHNAT_WDMA_PORT) ||
developer4164cfe2022-12-01 11:27:41 +08001814 ((hnat_priv->data->version == MTK_HNAT_V2 ||
1815 hnat_priv->data->version == MTK_HNAT_V3) &&
developere567ad32021-05-25 17:16:17 +08001816 (gmac_no == NR_WDMA0_PORT || gmac_no == NR_WDMA1_PORT))) {
developerfd40db22021-04-29 10:08:25 +08001817 entry->ipv4_hnapt.winfo.bssid = skb_hnat_bss_id(skb);
1818 entry->ipv4_hnapt.winfo.wcid = skb_hnat_wc_id(skb);
developerd35bbcc2022-09-28 22:46:01 +08001819#if defined(CONFIG_MEDIATEK_NETSYS_V3)
developer47545a32022-11-15 16:06:58 +08001820 entry->ipv4_hnapt.tport_id = IS_HQOS_DL_MODE ? 1 : 0;
developerd35bbcc2022-09-28 22:46:01 +08001821 entry->ipv4_hnapt.iblk2.rxid = skb_hnat_rx_id(skb);
1822 entry->ipv4_hnapt.iblk2.winfoi = 1;
1823 entry->ipv4_hnapt.winfo_pao.usr_info =
1824 skb_hnat_usr_info(skb);
1825 entry->ipv4_hnapt.winfo_pao.tid = skb_hnat_tid(skb);
1826 entry->ipv4_hnapt.winfo_pao.is_fixedrate =
1827 skb_hnat_is_fixedrate(skb);
1828 entry->ipv4_hnapt.winfo_pao.is_prior =
1829 skb_hnat_is_prior(skb);
1830 entry->ipv4_hnapt.winfo_pao.is_sp = skb_hnat_is_sp(skb);
1831 entry->ipv4_hnapt.winfo_pao.hf = skb_hnat_hf(skb);
1832 entry->ipv4_hnapt.winfo_pao.amsdu = skb_hnat_amsdu(skb);
1833#elif defined(CONFIG_MEDIATEK_NETSYS_V2)
developerfd40db22021-04-29 10:08:25 +08001834 entry->ipv4_hnapt.iblk2.rxid = skb_hnat_rx_id(skb);
1835 entry->ipv4_hnapt.iblk2.winfoi = 1;
1836#else
1837 entry->ipv4_hnapt.winfo.rxid = skb_hnat_rx_id(skb);
1838 entry->ipv4_hnapt.iblk2w.winfoi = 1;
1839 entry->ipv4_hnapt.iblk2w.wdmaid = skb_hnat_wdma_id(skb);
1840#endif
1841 } else {
1842 if (IS_GMAC1_MODE && !hnat_dsa_is_enable(hnat_priv)) {
developerbc552cc2022-03-15 16:19:27 +08001843 bfib1_tx.vpm = 1;
1844 bfib1_tx.vlan_layer = 1;
developerfd40db22021-04-29 10:08:25 +08001845
developerd35bbcc2022-09-28 22:46:01 +08001846 if (FROM_GE_LAN_GRP(skb))
developerfd40db22021-04-29 10:08:25 +08001847 entry->ipv4_hnapt.vlan1 = 1;
1848 else if (FROM_GE_WAN(skb) || FROM_GE_VIRTUAL(skb))
1849 entry->ipv4_hnapt.vlan1 = 2;
1850 }
1851
developer34028fb2022-01-11 13:51:29 +08001852 if (IS_HQOS_MODE &&
developerd35bbcc2022-09-28 22:46:01 +08001853 (FROM_GE_LAN_GRP(skb) || FROM_GE_WAN(skb) || FROM_GE_VIRTUAL(skb))) {
developerbc552cc2022-03-15 16:19:27 +08001854 bfib1_tx.vpm = 0;
1855 bfib1_tx.vlan_layer = 1;
developerfd40db22021-04-29 10:08:25 +08001856 entry->ipv4_hnapt.etype = htons(HQOS_MAGIC_TAG);
1857 entry->ipv4_hnapt.vlan1 = skb_hnat_entry(skb);
1858 entry->ipv4_hnapt.iblk2.fqos = 1;
1859 }
developerfd40db22021-04-29 10:08:25 +08001860 }
1861 entry->ipv4_hnapt.iblk2.dp = gmac_no;
developer5ffc5f12022-10-25 18:51:46 +08001862#if defined(CONFIG_MEDIATEK_NETSYS_V3)
1863 } else if (IS_IPV6_HNAPT(entry) || IS_IPV6_HNAT(entry)) {
1864 entry->ipv6_hnapt.iblk2.dp = gmac_no;
1865 entry->ipv6_hnapt.iblk2.rxid = skb_hnat_rx_id(skb);
1866 entry->ipv6_hnapt.iblk2.winfoi = 1;
1867
1868 entry->ipv6_hnapt.winfo.bssid = skb_hnat_bss_id(skb);
1869 entry->ipv6_hnapt.winfo.wcid = skb_hnat_wc_id(skb);
1870 entry->ipv6_hnapt.winfo_pao.usr_info = skb_hnat_usr_info(skb);
1871 entry->ipv6_hnapt.winfo_pao.tid = skb_hnat_tid(skb);
1872 entry->ipv6_hnapt.winfo_pao.is_fixedrate =
1873 skb_hnat_is_fixedrate(skb);
1874 entry->ipv6_hnapt.winfo_pao.is_prior = skb_hnat_is_prior(skb);
1875 entry->ipv6_hnapt.winfo_pao.is_sp = skb_hnat_is_sp(skb);
1876 entry->ipv6_hnapt.winfo_pao.hf = skb_hnat_hf(skb);
1877 entry->ipv6_hnapt.winfo_pao.amsdu = skb_hnat_amsdu(skb);
developer47545a32022-11-15 16:06:58 +08001878 entry->ipv6_hnapt.tport_id = IS_HQOS_DL_MODE ? 1 : 0;
developer5ffc5f12022-10-25 18:51:46 +08001879#endif
developerfd40db22021-04-29 10:08:25 +08001880 } else {
1881 entry->ipv6_5t_route.iblk2.fqos = 0;
developer4164cfe2022-12-01 11:27:41 +08001882 if ((hnat_priv->data->version == MTK_HNAT_V1_2 &&
developere567ad32021-05-25 17:16:17 +08001883 gmac_no == NR_WHNAT_WDMA_PORT) ||
developer4164cfe2022-12-01 11:27:41 +08001884 ((hnat_priv->data->version == MTK_HNAT_V2 ||
1885 hnat_priv->data->version == MTK_HNAT_V3) &&
developere567ad32021-05-25 17:16:17 +08001886 (gmac_no == NR_WDMA0_PORT || gmac_no == NR_WDMA1_PORT))) {
developerfd40db22021-04-29 10:08:25 +08001887 entry->ipv6_5t_route.winfo.bssid = skb_hnat_bss_id(skb);
1888 entry->ipv6_5t_route.winfo.wcid = skb_hnat_wc_id(skb);
developerd35bbcc2022-09-28 22:46:01 +08001889#if defined(CONFIG_MEDIATEK_NETSYS_V3)
developer47545a32022-11-15 16:06:58 +08001890 entry->ipv6_5t_route.tport_id = IS_HQOS_DL_MODE ? 1 : 0;
developerfd40db22021-04-29 10:08:25 +08001891 entry->ipv6_5t_route.iblk2.rxid = skb_hnat_rx_id(skb);
1892 entry->ipv6_5t_route.iblk2.winfoi = 1;
developerd35bbcc2022-09-28 22:46:01 +08001893 entry->ipv6_5t_route.winfo_pao.usr_info =
1894 skb_hnat_usr_info(skb);
1895 entry->ipv6_5t_route.winfo_pao.tid =
1896 skb_hnat_tid(skb);
1897 entry->ipv6_5t_route.winfo_pao.is_fixedrate =
1898 skb_hnat_is_fixedrate(skb);
1899 entry->ipv6_5t_route.winfo_pao.is_prior =
1900 skb_hnat_is_prior(skb);
1901 entry->ipv6_5t_route.winfo_pao.is_sp =
1902 skb_hnat_is_sp(skb);
1903 entry->ipv6_5t_route.winfo_pao.hf =
1904 skb_hnat_hf(skb);
1905 entry->ipv6_5t_route.winfo_pao.amsdu =
1906 skb_hnat_amsdu(skb);
1907#elif defined(CONFIG_MEDIATEK_NETSYS_V2)
1908 entry->ipv6_5t_route.iblk2.rxid = skb_hnat_rx_id(skb);
1909 entry->ipv6_5t_route.iblk2.winfoi = 1;
developerfd40db22021-04-29 10:08:25 +08001910#else
1911 entry->ipv6_5t_route.winfo.rxid = skb_hnat_rx_id(skb);
1912 entry->ipv6_5t_route.iblk2w.winfoi = 1;
1913 entry->ipv6_5t_route.iblk2w.wdmaid = skb_hnat_wdma_id(skb);
1914#endif
1915 } else {
1916 if (IS_GMAC1_MODE && !hnat_dsa_is_enable(hnat_priv)) {
developerbc552cc2022-03-15 16:19:27 +08001917 bfib1_tx.vpm = 1;
1918 bfib1_tx.vlan_layer = 1;
developerfd40db22021-04-29 10:08:25 +08001919
developerd35bbcc2022-09-28 22:46:01 +08001920 if (FROM_GE_LAN_GRP(skb))
developerfd40db22021-04-29 10:08:25 +08001921 entry->ipv6_5t_route.vlan1 = 1;
1922 else if (FROM_GE_WAN(skb) || FROM_GE_VIRTUAL(skb))
1923 entry->ipv6_5t_route.vlan1 = 2;
1924 }
1925
developer34028fb2022-01-11 13:51:29 +08001926 if (IS_HQOS_MODE &&
developerd35bbcc2022-09-28 22:46:01 +08001927 (FROM_GE_LAN_GRP(skb) || FROM_GE_WAN(skb) || FROM_GE_VIRTUAL(skb))) {
developerbc552cc2022-03-15 16:19:27 +08001928 bfib1_tx.vpm = 0;
1929 bfib1_tx.vlan_layer = 1;
developerfd40db22021-04-29 10:08:25 +08001930 entry->ipv6_5t_route.etype = htons(HQOS_MAGIC_TAG);
1931 entry->ipv6_5t_route.vlan1 = skb_hnat_entry(skb);
1932 entry->ipv6_5t_route.iblk2.fqos = 1;
1933 }
developerfd40db22021-04-29 10:08:25 +08001934 }
1935 entry->ipv6_5t_route.iblk2.dp = gmac_no;
1936 }
1937
developer7b36dca2022-05-19 18:29:10 +08001938 bfib1_tx.ttl = 1;
developerbc552cc2022-03-15 16:19:27 +08001939 bfib1_tx.state = BIND;
1940 wmb();
1941 memcpy(&entry->bfib1, &bfib1_tx, sizeof(bfib1_tx));
developerfd40db22021-04-29 10:08:25 +08001942
1943 return NF_ACCEPT;
1944}
1945
1946int mtk_sw_nat_hook_rx(struct sk_buff *skb)
1947{
developer99506e52021-06-30 22:03:02 +08001948 if (!IS_SPACE_AVAILABLE_HEAD(skb) || !FROM_WED(skb)) {
1949 skb_hnat_magic_tag(skb) = 0;
developerfd40db22021-04-29 10:08:25 +08001950 return NF_ACCEPT;
developer99506e52021-06-30 22:03:02 +08001951 }
developerfd40db22021-04-29 10:08:25 +08001952
1953 skb_hnat_alg(skb) = 0;
developerfdfe1572021-09-13 16:56:33 +08001954 skb_hnat_filled(skb) = 0;
developerfd40db22021-04-29 10:08:25 +08001955 skb_hnat_magic_tag(skb) = HNAT_MAGIC_TAG;
1956
1957 if (skb_hnat_iface(skb) == FOE_MAGIC_WED0)
1958 skb_hnat_sport(skb) = NR_WDMA0_PORT;
1959 else if (skb_hnat_iface(skb) == FOE_MAGIC_WED1)
1960 skb_hnat_sport(skb) = NR_WDMA1_PORT;
1961
1962 return NF_ACCEPT;
1963}
1964
1965void mtk_ppe_dev_register_hook(struct net_device *dev)
1966{
1967 int i, number = 0;
1968 struct extdev_entry *ext_entry;
1969
developerfd40db22021-04-29 10:08:25 +08001970 for (i = 1; i < MAX_IF_NUM; i++) {
1971 if (hnat_priv->wifi_hook_if[i] == dev) {
1972 pr_info("%s : %s has been registered in wifi_hook_if table[%d]\n",
1973 __func__, dev->name, i);
1974 return;
1975 }
developera7e6c242022-12-05 13:52:40 +08001976 }
1977
1978 for (i = 1; i < MAX_IF_NUM; i++) {
developerfd40db22021-04-29 10:08:25 +08001979 if (!hnat_priv->wifi_hook_if[i]) {
1980 if (find_extif_from_devname(dev->name)) {
1981 extif_set_dev(dev);
1982 goto add_wifi_hook_if;
1983 }
1984
1985 number = get_ext_device_number();
1986 if (number >= MAX_EXT_DEVS) {
1987 pr_info("%s : extdev array is full. %s is not registered\n",
1988 __func__, dev->name);
1989 return;
1990 }
1991
1992 ext_entry = kzalloc(sizeof(*ext_entry), GFP_KERNEL);
1993 if (!ext_entry)
1994 return;
1995
developer4c32b7a2021-11-13 16:46:43 +08001996 strncpy(ext_entry->name, dev->name, IFNAMSIZ - 1);
developerfd40db22021-04-29 10:08:25 +08001997 dev_hold(dev);
1998 ext_entry->dev = dev;
1999 ext_if_add(ext_entry);
2000
2001add_wifi_hook_if:
2002 dev_hold(dev);
2003 hnat_priv->wifi_hook_if[i] = dev;
2004
2005 break;
2006 }
2007 }
2008 pr_info("%s : ineterface %s register (%d)\n", __func__, dev->name, i);
2009}
2010
2011void mtk_ppe_dev_unregister_hook(struct net_device *dev)
2012{
2013 int i;
2014
2015 for (i = 1; i < MAX_IF_NUM; i++) {
2016 if (hnat_priv->wifi_hook_if[i] == dev) {
2017 hnat_priv->wifi_hook_if[i] = NULL;
2018 dev_put(dev);
2019
2020 break;
2021 }
2022 }
2023
2024 extif_put_dev(dev);
2025 pr_info("%s : ineterface %s set null (%d)\n", __func__, dev->name, i);
2026}
2027
2028static unsigned int mtk_hnat_accel_type(struct sk_buff *skb)
2029{
2030 struct dst_entry *dst;
2031 struct nf_conn *ct;
2032 enum ip_conntrack_info ctinfo;
2033 const struct nf_conn_help *help;
2034
2035 /* Do not accelerate 1st round of xfrm flow, and 2nd round of xfrm flow
2036 * is from local_out which is also filtered in sanity check.
2037 */
2038 dst = skb_dst(skb);
2039 if (dst && dst_xfrm(dst))
2040 return 0;
2041
2042 ct = nf_ct_get(skb, &ctinfo);
2043 if (!ct)
2044 return 1;
2045
2046 /* rcu_read_lock()ed by nf_hook_slow */
2047 help = nfct_help(ct);
2048 if (help && rcu_dereference(help->helper))
2049 return 0;
2050
2051 return 1;
2052}
2053
developer6f4a0c72021-10-19 10:04:22 +08002054static void mtk_hnat_dscp_update(struct sk_buff *skb, struct foe_entry *entry)
2055{
2056 struct iphdr *iph;
2057 struct ethhdr *eth;
2058 struct ipv6hdr *ip6h;
2059 bool flag = false;
2060
2061 eth = eth_hdr(skb);
2062 switch (ntohs(eth->h_proto)) {
2063 case ETH_P_IP:
2064 iph = ip_hdr(skb);
developer001e7be2021-12-09 15:00:27 +08002065 if (IS_IPV4_GRP(entry) && entry->ipv4_hnapt.iblk2.dscp != iph->tos)
developer6f4a0c72021-10-19 10:04:22 +08002066 flag = true;
2067 break;
2068 case ETH_P_IPV6:
2069 ip6h = ipv6_hdr(skb);
developer001e7be2021-12-09 15:00:27 +08002070 if ((IS_IPV6_3T_ROUTE(entry) || IS_IPV6_5T_ROUTE(entry)) &&
2071 (entry->ipv6_5t_route.iblk2.dscp !=
2072 (ip6h->priority << 4 | (ip6h->flow_lbl[0] >> 4))))
developer6f4a0c72021-10-19 10:04:22 +08002073 flag = true;
2074 break;
2075 default:
2076 return;
2077 }
2078
2079 if (flag) {
developer1080dd82022-03-07 19:31:04 +08002080 if (debug_level >= 2)
2081 pr_info("Delete entry idx=%d.\n", skb_hnat_entry(skb));
developer6f4a0c72021-10-19 10:04:22 +08002082 memset(entry, 0, sizeof(struct foe_entry));
2083 hnat_cache_ebl(1);
2084 }
2085}
2086
developer30a47682021-11-02 17:06:14 +08002087static void mtk_hnat_nf_update(struct sk_buff *skb)
2088{
2089 struct nf_conn *ct;
2090 struct nf_conn_acct *acct;
2091 struct nf_conn_counter *counter;
2092 enum ip_conntrack_info ctinfo;
2093 struct hnat_accounting diff;
2094
2095 ct = nf_ct_get(skb, &ctinfo);
2096 if (ct) {
2097 if (!hnat_get_count(hnat_priv, skb_hnat_ppe(skb), skb_hnat_entry(skb), &diff))
2098 return;
2099
2100 acct = nf_conn_acct_find(ct);
2101 if (acct) {
2102 counter = acct->counter;
2103 atomic64_add(diff.packets, &counter[CTINFO2DIR(ctinfo)].packets);
2104 atomic64_add(diff.bytes, &counter[CTINFO2DIR(ctinfo)].bytes);
2105 }
2106 }
2107}
2108
developerfd40db22021-04-29 10:08:25 +08002109static unsigned int mtk_hnat_nf_post_routing(
2110 struct sk_buff *skb, const struct net_device *out,
2111 unsigned int (*fn)(struct sk_buff *, const struct net_device *,
2112 struct flow_offload_hw_path *),
2113 const char *func)
2114{
2115 struct foe_entry *entry;
2116 struct flow_offload_hw_path hw_path = { .dev = (struct net_device*)out,
developere5763512021-05-21 01:04:58 +08002117 .virt_dev = (struct net_device*)out };
developerfd40db22021-04-29 10:08:25 +08002118 const struct net_device *arp_dev = out;
2119
2120 if (skb_hnat_alg(skb) || unlikely(!is_magic_tag_valid(skb) ||
2121 !IS_SPACE_AVAILABLE_HEAD(skb)))
2122 return 0;
2123
developerc0419aa2022-12-07 15:56:36 +08002124 if (unlikely(!skb_mac_header_was_set(skb)))
2125 return 0;
2126
developerfd40db22021-04-29 10:08:25 +08002127 if (unlikely(!skb_hnat_is_hashed(skb)))
2128 return 0;
2129
2130 if (out->netdev_ops->ndo_flow_offload_check) {
developere5763512021-05-21 01:04:58 +08002131 out->netdev_ops->ndo_flow_offload_check(&hw_path);
developerfd40db22021-04-29 10:08:25 +08002132 out = (IS_GMAC1_MODE) ? hw_path.virt_dev : hw_path.dev;
2133 }
2134
developerd35bbcc2022-09-28 22:46:01 +08002135 if (!IS_LAN_GRP(out) && !IS_WAN(out) && !IS_EXT(out))
developerfd40db22021-04-29 10:08:25 +08002136 return 0;
2137
2138 trace_printk("[%s] case hit, %x-->%s, reason=%x\n", __func__,
2139 skb_hnat_iface(skb), out->name, skb_hnat_reason(skb));
2140
developer577ad2f2022-11-28 10:33:36 +08002141 if (skb_hnat_entry(skb) >= hnat_priv->foe_etry_num ||
2142 skb_hnat_ppe(skb) >= CFG_PPE_NUM)
2143 return -1;
2144
developer471f6562021-05-10 20:48:34 +08002145 entry = &hnat_priv->foe_table_cpu[skb_hnat_ppe(skb)][skb_hnat_entry(skb)];
developerfd40db22021-04-29 10:08:25 +08002146
2147 switch (skb_hnat_reason(skb)) {
2148 case HIT_UNBIND_RATE_REACH:
2149 if (entry_hnat_is_bound(entry))
2150 break;
2151
2152 if (fn && !mtk_hnat_accel_type(skb))
2153 break;
2154
2155 if (fn && fn(skb, arp_dev, &hw_path))
2156 break;
2157
2158 skb_to_hnat_info(skb, out, entry, &hw_path);
2159 break;
2160 case HIT_BIND_KEEPALIVE_DUP_OLD_HDR:
developer30a47682021-11-02 17:06:14 +08002161 /* update hnat count to nf_conntrack by keepalive */
2162 if (hnat_priv->data->per_flow_accounting && hnat_priv->nf_stat_en)
2163 mtk_hnat_nf_update(skb);
2164
developerfd40db22021-04-29 10:08:25 +08002165 if (fn && !mtk_hnat_accel_type(skb))
2166 break;
2167
developer6f4a0c72021-10-19 10:04:22 +08002168 /* update dscp for qos */
2169 mtk_hnat_dscp_update(skb, entry);
2170
developerfd40db22021-04-29 10:08:25 +08002171 /* update mcast timestamp*/
developer4164cfe2022-12-01 11:27:41 +08002172 if (hnat_priv->data->version == MTK_HNAT_V1_3 &&
developerfd40db22021-04-29 10:08:25 +08002173 hnat_priv->data->mcast && entry->bfib1.sta == 1)
2174 entry->ipv4_hnapt.m_timestamp = foe_timestamp(hnat_priv);
2175
2176 if (entry_hnat_is_bound(entry)) {
2177 memset(skb_hnat_info(skb), 0, FOE_INFO_LEN);
2178
2179 return -1;
2180 }
2181 break;
2182 case HIT_BIND_MULTICAST_TO_CPU:
2183 case HIT_BIND_MULTICAST_TO_GMAC_CPU:
2184 /*do not forward to gdma again,if ppe already done it*/
developerd35bbcc2022-09-28 22:46:01 +08002185 if (IS_LAN_GRP(out) || IS_WAN(out))
developerfd40db22021-04-29 10:08:25 +08002186 return -1;
2187 break;
2188 }
2189
2190 return 0;
2191}
2192
2193static unsigned int
2194mtk_hnat_ipv6_nf_local_out(void *priv, struct sk_buff *skb,
2195 const struct nf_hook_state *state)
2196{
2197 struct foe_entry *entry;
2198 struct ipv6hdr *ip6h;
2199 struct iphdr _iphdr;
2200 const struct iphdr *iph;
2201 struct tcpudphdr _ports;
2202 const struct tcpudphdr *pptr;
2203 int udp = 0;
2204
2205 if (unlikely(!skb_hnat_is_hashed(skb)))
2206 return NF_ACCEPT;
2207
developer577ad2f2022-11-28 10:33:36 +08002208 if (skb_hnat_entry(skb) >= hnat_priv->foe_etry_num ||
2209 skb_hnat_ppe(skb) >= CFG_PPE_NUM)
2210 return NF_ACCEPT;
2211
developer471f6562021-05-10 20:48:34 +08002212 entry = &hnat_priv->foe_table_cpu[skb_hnat_ppe(skb)][skb_hnat_entry(skb)];
developerfd40db22021-04-29 10:08:25 +08002213 if (skb_hnat_reason(skb) == HIT_UNBIND_RATE_REACH) {
2214 ip6h = ipv6_hdr(skb);
2215 if (ip6h->nexthdr == NEXTHDR_IPIP) {
2216 /* Map-E LAN->WAN: need to record orig info before fn. */
2217 if (mape_toggle) {
2218 iph = skb_header_pointer(skb, IPV6_HDR_LEN,
2219 sizeof(_iphdr), &_iphdr);
developer4c32b7a2021-11-13 16:46:43 +08002220 if (unlikely(!iph))
2221 return NF_ACCEPT;
2222
developerfd40db22021-04-29 10:08:25 +08002223 switch (iph->protocol) {
2224 case IPPROTO_UDP:
2225 udp = 1;
2226 case IPPROTO_TCP:
2227 break;
2228
2229 default:
2230 return NF_ACCEPT;
2231 }
2232
2233 pptr = skb_header_pointer(skb, IPV6_HDR_LEN + iph->ihl * 4,
2234 sizeof(_ports), &_ports);
developer4c32b7a2021-11-13 16:46:43 +08002235 if (unlikely(!pptr))
2236 return NF_ACCEPT;
2237
developerfd40db22021-04-29 10:08:25 +08002238 entry->bfib1.udp = udp;
2239
developer25fc8c02022-05-06 16:24:02 +08002240 /* Map-E LAN->WAN record inner IPv4 header info. */
developerd35bbcc2022-09-28 22:46:01 +08002241#if defined(CONFIG_MEDIATEK_NETSYS_V2) || defined(CONFIG_MEDIATEK_NETSYS_V3)
developerfd40db22021-04-29 10:08:25 +08002242 entry->bfib1.pkt_type = IPV4_MAP_E;
2243 entry->ipv4_dslite.iblk2.dscp = iph->tos;
developerd35bbcc2022-09-28 22:46:01 +08002244 entry->ipv4_mape.new_sip = ntohl(iph->saddr);
2245 entry->ipv4_mape.new_dip = ntohl(iph->daddr);
2246 entry->ipv4_mape.new_sport = ntohs(pptr->src);
2247 entry->ipv4_mape.new_dport = ntohs(pptr->dst);
developerfd40db22021-04-29 10:08:25 +08002248#else
2249 entry->ipv4_hnapt.iblk2.dscp = iph->tos;
2250 entry->ipv4_hnapt.new_sip = ntohl(iph->saddr);
2251 entry->ipv4_hnapt.new_dip = ntohl(iph->daddr);
2252 entry->ipv4_hnapt.new_sport = ntohs(pptr->src);
2253 entry->ipv4_hnapt.new_dport = ntohs(pptr->dst);
2254#endif
2255 } else {
2256 entry->bfib1.pkt_type = IPV4_DSLITE;
2257 }
2258 }
2259 }
2260 return NF_ACCEPT;
2261}
2262
2263static unsigned int
2264mtk_hnat_ipv6_nf_post_routing(void *priv, struct sk_buff *skb,
2265 const struct nf_hook_state *state)
2266{
developer577ad2f2022-11-28 10:33:36 +08002267 if (!skb)
2268 goto drop;
2269
developerfd40db22021-04-29 10:08:25 +08002270 post_routing_print(skb, state->in, state->out, __func__);
2271
2272 if (!mtk_hnat_nf_post_routing(skb, state->out, hnat_ipv6_get_nexthop,
2273 __func__))
2274 return NF_ACCEPT;
2275
developer577ad2f2022-11-28 10:33:36 +08002276drop:
2277 if (skb)
2278 trace_printk(
2279 "%s:drop (iif=0x%x, out_dev=%s, CB2=0x%x, ppe_hash=0x%x,\n"
2280 "sport=0x%x, reason=0x%x, alg=0x%x)\n",
2281 __func__, skb_hnat_iface(skb), state->out->name,
2282 HNAT_SKB_CB2(skb)->magic, skb_hnat_entry(skb),
2283 skb_hnat_sport(skb), skb_hnat_reason(skb),
2284 skb_hnat_alg(skb));
developerfd40db22021-04-29 10:08:25 +08002285
2286 return NF_DROP;
2287}
2288
2289static unsigned int
2290mtk_hnat_ipv4_nf_post_routing(void *priv, struct sk_buff *skb,
2291 const struct nf_hook_state *state)
2292{
developer577ad2f2022-11-28 10:33:36 +08002293 if (!skb)
2294 goto drop;
2295
developerfd40db22021-04-29 10:08:25 +08002296 post_routing_print(skb, state->in, state->out, __func__);
2297
2298 if (!mtk_hnat_nf_post_routing(skb, state->out, hnat_ipv4_get_nexthop,
2299 __func__))
2300 return NF_ACCEPT;
2301
developer577ad2f2022-11-28 10:33:36 +08002302drop:
2303 if (skb)
2304 trace_printk(
2305 "%s:drop (iif=0x%x, out_dev=%s, CB2=0x%x, ppe_hash=0x%x,\n"
2306 "sport=0x%x, reason=0x%x, alg=0x%x)\n",
2307 __func__, skb_hnat_iface(skb), state->out->name,
2308 HNAT_SKB_CB2(skb)->magic, skb_hnat_entry(skb),
2309 skb_hnat_sport(skb), skb_hnat_reason(skb),
2310 skb_hnat_alg(skb));
developerfd40db22021-04-29 10:08:25 +08002311
2312 return NF_DROP;
2313}
2314
2315static unsigned int
2316mtk_pong_hqos_handler(void *priv, struct sk_buff *skb,
2317 const struct nf_hook_state *state)
2318{
developer659fdeb2022-12-01 23:03:07 +08002319 struct vlan_ethhdr *veth;
2320
2321 if (!skb)
2322 goto drop;
2323
2324 veth = (struct vlan_ethhdr *)skb_mac_header(skb);
developerfd40db22021-04-29 10:08:25 +08002325
developer34028fb2022-01-11 13:51:29 +08002326 if (IS_HQOS_MODE && eth_hdr(skb)->h_proto == HQOS_MAGIC_TAG) {
developerfd40db22021-04-29 10:08:25 +08002327 skb_hnat_entry(skb) = ntohs(veth->h_vlan_TCI) & 0x3fff;
2328 skb_hnat_reason(skb) = HIT_BIND_FORCE_TO_CPU;
2329 }
developerfd40db22021-04-29 10:08:25 +08002330
2331 if (skb_hnat_iface(skb) == FOE_MAGIC_EXT)
2332 clr_from_extge(skb);
2333
2334 /* packets from external devices -> xxx ,step 2, learning stage */
developeraf07fad2021-11-19 17:53:42 +08002335 if (do_ext2ge_fast_learn(state->in, skb) && (!qos_toggle ||
2336 (qos_toggle && eth_hdr(skb)->h_proto != HQOS_MAGIC_TAG))) {
developerfd40db22021-04-29 10:08:25 +08002337 if (!do_hnat_ext_to_ge2(skb, __func__))
2338 return NF_STOLEN;
2339 goto drop;
2340 }
2341
2342 /* packets form ge -> external device */
2343 if (do_ge2ext_fast(state->in, skb)) {
2344 if (!do_hnat_ge_to_ext(skb, __func__))
2345 return NF_STOLEN;
2346 goto drop;
2347 }
2348
2349 return NF_ACCEPT;
developer577ad2f2022-11-28 10:33:36 +08002350
developerfd40db22021-04-29 10:08:25 +08002351drop:
developer577ad2f2022-11-28 10:33:36 +08002352 if (skb)
2353 printk_ratelimited(KERN_WARNING
2354 "%s:drop (in_dev=%s, iif=0x%x, CB2=0x%x, ppe_hash=0x%x,\n"
2355 "sport=0x%x, reason=0x%x, alg=0x%x)\n",
2356 __func__, state->in->name, skb_hnat_iface(skb),
2357 HNAT_SKB_CB2(skb)->magic, skb_hnat_entry(skb),
2358 skb_hnat_sport(skb), skb_hnat_reason(skb),
2359 skb_hnat_alg(skb));
developerfd40db22021-04-29 10:08:25 +08002360
2361 return NF_DROP;
2362}
2363
2364static unsigned int
2365mtk_hnat_br_nf_local_out(void *priv, struct sk_buff *skb,
2366 const struct nf_hook_state *state)
2367{
developer577ad2f2022-11-28 10:33:36 +08002368 if (!skb)
2369 goto drop;
2370
developerfd40db22021-04-29 10:08:25 +08002371 post_routing_print(skb, state->in, state->out, __func__);
2372
2373 if (!mtk_hnat_nf_post_routing(skb, state->out, 0, __func__))
2374 return NF_ACCEPT;
2375
developer577ad2f2022-11-28 10:33:36 +08002376drop:
2377 if (skb)
2378 trace_printk(
2379 "%s:drop (iif=0x%x, out_dev=%s, CB2=0x%x, ppe_hash=0x%x,\n"
2380 "sport=0x%x, reason=0x%x, alg=0x%x)\n",
2381 __func__, skb_hnat_iface(skb), state->out->name,
2382 HNAT_SKB_CB2(skb)->magic, skb_hnat_entry(skb),
2383 skb_hnat_sport(skb), skb_hnat_reason(skb),
2384 skb_hnat_alg(skb));
developerfd40db22021-04-29 10:08:25 +08002385
2386 return NF_DROP;
2387}
2388
2389static unsigned int
2390mtk_hnat_ipv4_nf_local_out(void *priv, struct sk_buff *skb,
2391 const struct nf_hook_state *state)
2392{
2393 struct sk_buff *new_skb;
2394 struct foe_entry *entry;
2395 struct iphdr *iph;
2396
2397 if (!skb_hnat_is_hashed(skb))
2398 return NF_ACCEPT;
2399
developer577ad2f2022-11-28 10:33:36 +08002400 if (skb_hnat_entry(skb) >= hnat_priv->foe_etry_num ||
2401 skb_hnat_ppe(skb) >= CFG_PPE_NUM)
2402 return NF_ACCEPT;
2403
developer471f6562021-05-10 20:48:34 +08002404 entry = &hnat_priv->foe_table_cpu[skb_hnat_ppe(skb)][skb_hnat_entry(skb)];
developerfd40db22021-04-29 10:08:25 +08002405
2406 if (unlikely(skb_headroom(skb) < FOE_INFO_LEN)) {
2407 new_skb = skb_realloc_headroom(skb, FOE_INFO_LEN);
2408 if (!new_skb) {
2409 dev_info(hnat_priv->dev, "%s:drop\n", __func__);
2410 return NF_DROP;
2411 }
2412 dev_kfree_skb(skb);
2413 skb = new_skb;
2414 }
2415
2416 /* Make the flow from local not be bound. */
2417 iph = ip_hdr(skb);
2418 if (iph->protocol == IPPROTO_IPV6) {
2419 entry->udib1.pkt_type = IPV6_6RD;
2420 hnat_set_head_frags(state, skb, 0, hnat_set_alg);
2421 } else {
2422 hnat_set_head_frags(state, skb, 1, hnat_set_alg);
2423 }
2424
2425 return NF_ACCEPT;
2426}
2427
2428static unsigned int mtk_hnat_br_nf_forward(void *priv,
2429 struct sk_buff *skb,
2430 const struct nf_hook_state *state)
2431{
developer4164cfe2022-12-01 11:27:41 +08002432 if ((hnat_priv->data->version == MTK_HNAT_V1_2) &&
developer99506e52021-06-30 22:03:02 +08002433 unlikely(IS_EXT(state->in) && IS_EXT(state->out)))
developerfd40db22021-04-29 10:08:25 +08002434 hnat_set_head_frags(state, skb, 1, hnat_set_alg);
2435
2436 return NF_ACCEPT;
2437}
2438
2439static struct nf_hook_ops mtk_hnat_nf_ops[] __read_mostly = {
2440 {
2441 .hook = mtk_hnat_ipv4_nf_pre_routing,
2442 .pf = NFPROTO_IPV4,
2443 .hooknum = NF_INET_PRE_ROUTING,
2444 .priority = NF_IP_PRI_FIRST + 1,
2445 },
2446 {
2447 .hook = mtk_hnat_ipv6_nf_pre_routing,
2448 .pf = NFPROTO_IPV6,
2449 .hooknum = NF_INET_PRE_ROUTING,
2450 .priority = NF_IP_PRI_FIRST + 1,
2451 },
2452 {
2453 .hook = mtk_hnat_ipv6_nf_post_routing,
2454 .pf = NFPROTO_IPV6,
2455 .hooknum = NF_INET_POST_ROUTING,
2456 .priority = NF_IP_PRI_LAST,
2457 },
2458 {
2459 .hook = mtk_hnat_ipv6_nf_local_out,
2460 .pf = NFPROTO_IPV6,
2461 .hooknum = NF_INET_LOCAL_OUT,
2462 .priority = NF_IP_PRI_LAST,
2463 },
2464 {
2465 .hook = mtk_hnat_ipv4_nf_post_routing,
2466 .pf = NFPROTO_IPV4,
2467 .hooknum = NF_INET_POST_ROUTING,
2468 .priority = NF_IP_PRI_LAST,
2469 },
2470 {
2471 .hook = mtk_hnat_ipv4_nf_local_out,
2472 .pf = NFPROTO_IPV4,
2473 .hooknum = NF_INET_LOCAL_OUT,
2474 .priority = NF_IP_PRI_LAST,
2475 },
2476 {
2477 .hook = mtk_hnat_br_nf_local_in,
2478 .pf = NFPROTO_BRIDGE,
2479 .hooknum = NF_BR_LOCAL_IN,
2480 .priority = NF_BR_PRI_FIRST,
2481 },
2482 {
2483 .hook = mtk_hnat_br_nf_local_out,
2484 .pf = NFPROTO_BRIDGE,
2485 .hooknum = NF_BR_LOCAL_OUT,
2486 .priority = NF_BR_PRI_LAST - 1,
2487 },
2488 {
2489 .hook = mtk_pong_hqos_handler,
2490 .pf = NFPROTO_BRIDGE,
2491 .hooknum = NF_BR_PRE_ROUTING,
developer2b85f652021-08-19 16:09:50 +08002492 .priority = NF_BR_PRI_FIRST + 1,
developerfd40db22021-04-29 10:08:25 +08002493 },
2494};
2495
2496int hnat_register_nf_hooks(void)
2497{
2498 return nf_register_net_hooks(&init_net, mtk_hnat_nf_ops, ARRAY_SIZE(mtk_hnat_nf_ops));
2499}
2500
2501void hnat_unregister_nf_hooks(void)
2502{
2503 nf_unregister_net_hooks(&init_net, mtk_hnat_nf_ops, ARRAY_SIZE(mtk_hnat_nf_ops));
2504}
2505
2506int whnat_adjust_nf_hooks(void)
2507{
2508 struct nf_hook_ops *hook = mtk_hnat_nf_ops;
2509 unsigned int n = ARRAY_SIZE(mtk_hnat_nf_ops);
2510
developerfd40db22021-04-29 10:08:25 +08002511 while (n-- > 0) {
2512 if (hook[n].hook == mtk_hnat_br_nf_local_in) {
2513 hook[n].hooknum = NF_BR_PRE_ROUTING;
developer2b85f652021-08-19 16:09:50 +08002514 hook[n].priority = NF_BR_PRI_FIRST + 1;
developerfd40db22021-04-29 10:08:25 +08002515 } else if (hook[n].hook == mtk_hnat_br_nf_local_out) {
2516 hook[n].hooknum = NF_BR_POST_ROUTING;
2517 } else if (hook[n].hook == mtk_pong_hqos_handler) {
2518 hook[n].hook = mtk_hnat_br_nf_forward;
2519 hook[n].hooknum = NF_BR_FORWARD;
2520 hook[n].priority = NF_BR_PRI_LAST - 1;
2521 }
2522 }
2523
2524 return 0;
2525}
2526
developerfd40db22021-04-29 10:08:25 +08002527int mtk_hqos_ptype_cb(struct sk_buff *skb, struct net_device *dev,
2528 struct packet_type *pt, struct net_device *unused)
2529{
2530 struct vlan_ethhdr *veth = (struct vlan_ethhdr *)skb_mac_header(skb);
2531
2532 skb_hnat_entry(skb) = ntohs(veth->h_vlan_TCI) & 0x3fff;
2533 skb_hnat_reason(skb) = HIT_BIND_FORCE_TO_CPU;
2534
developer659fdeb2022-12-01 23:03:07 +08002535 if (do_hnat_ge_to_ext(skb, __func__) == -1)
2536 return 1;
developerfd40db22021-04-29 10:08:25 +08002537
2538 return 0;
2539}
developerfd40db22021-04-29 10:08:25 +08002540