blob: 8d199efeb8bc82efbb2d6e9256f714fcaae18bad [file] [log] [blame]
developerfd40db22021-04-29 10:08:25 +08001/* This program is free software; you can redistribute it and/or modify
2 * it under the terms of the GNU General Public License as published by
3 * the Free Software Foundation; version 2 of the License
4 *
5 * This program is distributed in the hope that it will be useful,
6 * but WITHOUT ANY WARRANTY; without even the implied warranty of
7 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
8 * GNU General Public License for more details.
9 *
10 * Copyright (C) 2014-2016 Sean Wang <sean.wang@mediatek.com>
11 * Copyright (C) 2016-2017 John Crispin <blogic@openwrt.org>
12 */
13
14#include <linux/netfilter_bridge.h>
15#include <linux/netfilter_ipv6.h>
16
17#include <net/arp.h>
18#include <net/neighbour.h>
19#include <net/netfilter/nf_conntrack_helper.h>
20#include <net/netfilter/nf_flow_table.h>
21#include <net/ipv6.h>
22#include <net/ip6_route.h>
23#include <net/ip.h>
24#include <net/tcp.h>
25#include <net/udp.h>
developer30a47682021-11-02 17:06:14 +080026#include <net/netfilter/nf_conntrack.h>
27#include <net/netfilter/nf_conntrack_acct.h>
developerfd40db22021-04-29 10:08:25 +080028
29#include "nf_hnat_mtk.h"
30#include "hnat.h"
31
32#include "../mtk_eth_soc.h"
33
34#define do_ge2ext_fast(dev, skb) \
35 ((IS_LAN(dev) || IS_WAN(dev) || IS_PPD(dev)) && \
36 skb_hnat_is_hashed(skb) && \
37 skb_hnat_reason(skb) == HIT_BIND_FORCE_TO_CPU)
38#define do_ext2ge_fast_learn(dev, skb) \
39 (IS_PPD(dev) && \
40 (skb_hnat_sport(skb) == NR_PDMA_PORT || \
41 skb_hnat_sport(skb) == NR_QDMA_PORT) && \
42 ((get_dev_from_index(skb->vlan_tci & VLAN_VID_MASK)) || \
43 get_wandev_from_index(skb->vlan_tci & VLAN_VID_MASK)))
44#define do_mape_w2l_fast(dev, skb) \
45 (mape_toggle && IS_WAN(dev) && (!is_from_mape(skb)))
46
47static struct ipv6hdr mape_l2w_v6h;
48static struct ipv6hdr mape_w2l_v6h;
49static inline uint8_t get_wifi_hook_if_index_from_dev(const struct net_device *dev)
50{
51 int i;
52
53 for (i = 1; i < MAX_IF_NUM; i++) {
54 if (hnat_priv->wifi_hook_if[i] == dev)
55 return i;
56 }
57
58 return 0;
59}
60
61static inline int get_ext_device_number(void)
62{
63 int i, number = 0;
64
65 for (i = 0; i < MAX_EXT_DEVS && hnat_priv->ext_if[i]; i++)
66 number += 1;
67 return number;
68}
69
70static inline int find_extif_from_devname(const char *name)
71{
72 int i;
73 struct extdev_entry *ext_entry;
74
75 for (i = 0; i < MAX_EXT_DEVS && hnat_priv->ext_if[i]; i++) {
76 ext_entry = hnat_priv->ext_if[i];
77 if (!strcmp(name, ext_entry->name))
78 return 1;
79 }
80 return 0;
81}
82
83static inline int get_index_from_dev(const struct net_device *dev)
84{
85 int i;
86 struct extdev_entry *ext_entry;
87
88 for (i = 0; i < MAX_EXT_DEVS && hnat_priv->ext_if[i]; i++) {
89 ext_entry = hnat_priv->ext_if[i];
90 if (dev == ext_entry->dev)
91 return ext_entry->dev->ifindex;
92 }
93 return 0;
94}
95
96static inline struct net_device *get_dev_from_index(int index)
97{
98 int i;
99 struct extdev_entry *ext_entry;
100 struct net_device *dev = 0;
101
102 for (i = 0; i < MAX_EXT_DEVS && hnat_priv->ext_if[i]; i++) {
103 ext_entry = hnat_priv->ext_if[i];
104 if (ext_entry->dev && index == ext_entry->dev->ifindex) {
105 dev = ext_entry->dev;
106 break;
107 }
108 }
109 return dev;
110}
111
112static inline struct net_device *get_wandev_from_index(int index)
113{
developer8c9c0d02021-06-18 16:15:37 +0800114 if (!hnat_priv->g_wandev)
115 hnat_priv->g_wandev = dev_get_by_name(&init_net, hnat_priv->wan);
developerfd40db22021-04-29 10:08:25 +0800116
developer8c9c0d02021-06-18 16:15:37 +0800117 if (hnat_priv->g_wandev && hnat_priv->g_wandev->ifindex == index)
118 return hnat_priv->g_wandev;
developerfd40db22021-04-29 10:08:25 +0800119 return NULL;
120}
121
122static inline int extif_set_dev(struct net_device *dev)
123{
124 int i;
125 struct extdev_entry *ext_entry;
126
127 for (i = 0; i < MAX_EXT_DEVS && hnat_priv->ext_if[i]; i++) {
128 ext_entry = hnat_priv->ext_if[i];
129 if (!strcmp(dev->name, ext_entry->name) && !ext_entry->dev) {
130 dev_hold(dev);
131 ext_entry->dev = dev;
132 pr_info("%s(%s)\n", __func__, dev->name);
133
134 return ext_entry->dev->ifindex;
135 }
136 }
137
138 return -1;
139}
140
141static inline int extif_put_dev(struct net_device *dev)
142{
143 int i;
144 struct extdev_entry *ext_entry;
145
146 for (i = 0; i < MAX_EXT_DEVS && hnat_priv->ext_if[i]; i++) {
147 ext_entry = hnat_priv->ext_if[i];
148 if (ext_entry->dev == dev) {
149 ext_entry->dev = NULL;
150 dev_put(dev);
151 pr_info("%s(%s)\n", __func__, dev->name);
152
developerbc53e5f2021-05-21 10:07:17 +0800153 return 0;
developerfd40db22021-04-29 10:08:25 +0800154 }
155 }
156
157 return -1;
158}
159
160int ext_if_add(struct extdev_entry *ext_entry)
161{
162 int len = get_ext_device_number();
163
developer4c32b7a2021-11-13 16:46:43 +0800164 if (len < MAX_EXT_DEVS)
165 hnat_priv->ext_if[len++] = ext_entry;
166
developerfd40db22021-04-29 10:08:25 +0800167 return len;
168}
169
170int ext_if_del(struct extdev_entry *ext_entry)
171{
172 int i, j;
173
174 for (i = 0; i < MAX_EXT_DEVS; i++) {
175 if (hnat_priv->ext_if[i] == ext_entry) {
176 for (j = i; hnat_priv->ext_if[j] && j < MAX_EXT_DEVS - 1; j++)
177 hnat_priv->ext_if[j] = hnat_priv->ext_if[j + 1];
178 hnat_priv->ext_if[j] = NULL;
179 break;
180 }
181 }
182
183 return i;
184}
185
186void foe_clear_all_bind_entries(struct net_device *dev)
187{
developer471f6562021-05-10 20:48:34 +0800188 int i, hash_index;
developerfd40db22021-04-29 10:08:25 +0800189 struct foe_entry *entry;
190
191 if (!IS_LAN(dev) && !IS_WAN(dev) &&
192 !find_extif_from_devname(dev->name) &&
193 !dev->netdev_ops->ndo_flow_offload_check)
194 return;
195
developer471f6562021-05-10 20:48:34 +0800196 for (i = 0; i < CFG_PPE_NUM; i++) {
197 cr_set_field(hnat_priv->ppe_base[i] + PPE_TB_CFG,
198 SMA, SMA_ONLY_FWD_CPU);
199
200 for (hash_index = 0; hash_index < hnat_priv->foe_etry_num; hash_index++) {
201 entry = hnat_priv->foe_table_cpu[i] + hash_index;
202 if (entry->bfib1.state == BIND) {
203 entry->ipv4_hnapt.udib1.state = INVALID;
204 entry->ipv4_hnapt.udib1.time_stamp =
205 readl((hnat_priv->fe_base + 0x0010)) & 0xFF;
206 }
developerfd40db22021-04-29 10:08:25 +0800207 }
208 }
209
210 /* clear HWNAT cache */
211 hnat_cache_ebl(1);
212
213 mod_timer(&hnat_priv->hnat_sma_build_entry_timer, jiffies + 3 * HZ);
214}
215
216static void gmac_ppe_fwd_enable(struct net_device *dev)
217{
218 if (IS_LAN(dev) || IS_GMAC1_MODE)
219 set_gmac_ppe_fwd(0, 1);
220 else if (IS_WAN(dev))
221 set_gmac_ppe_fwd(1, 1);
222}
223
224int nf_hnat_netdevice_event(struct notifier_block *unused, unsigned long event,
225 void *ptr)
226{
227 struct net_device *dev;
228
229 dev = netdev_notifier_info_to_dev(ptr);
230
231 switch (event) {
232 case NETDEV_UP:
233 gmac_ppe_fwd_enable(dev);
234
235 extif_set_dev(dev);
236
237 break;
238 case NETDEV_GOING_DOWN:
239 if (!get_wifi_hook_if_index_from_dev(dev))
240 extif_put_dev(dev);
241
242 foe_clear_all_bind_entries(dev);
243
244 break;
developer8c9c0d02021-06-18 16:15:37 +0800245 case NETDEV_UNREGISTER:
developer1901f412022-01-04 17:22:00 +0800246 if (hnat_priv->g_ppdev == dev) {
developer8c9c0d02021-06-18 16:15:37 +0800247 hnat_priv->g_ppdev = NULL;
248 dev_put(dev);
249 }
developer1901f412022-01-04 17:22:00 +0800250 if (hnat_priv->g_wandev == dev) {
developer8c9c0d02021-06-18 16:15:37 +0800251 hnat_priv->g_wandev = NULL;
252 dev_put(dev);
253 }
254
255 break;
256 case NETDEV_REGISTER:
257 if (IS_PPD(dev) && !hnat_priv->g_ppdev)
258 hnat_priv->g_ppdev = dev_get_by_name(&init_net, hnat_priv->ppd);
259 if (IS_WAN(dev) && !hnat_priv->g_wandev)
260 hnat_priv->g_wandev = dev_get_by_name(&init_net, hnat_priv->wan);
261
262 break;
developerfd40db22021-04-29 10:08:25 +0800263 default:
264 break;
265 }
266
267 return NOTIFY_DONE;
268}
269
270void foe_clear_entry(struct neighbour *neigh)
271{
272 u32 *daddr = (u32 *)neigh->primary_key;
273 unsigned char h_dest[ETH_ALEN];
274 struct foe_entry *entry;
developer471f6562021-05-10 20:48:34 +0800275 int i, hash_index;
developerfd40db22021-04-29 10:08:25 +0800276 u32 dip;
277
278 dip = (u32)(*daddr);
279
developer471f6562021-05-10 20:48:34 +0800280 for (i = 0; i < CFG_PPE_NUM; i++) {
281 for (hash_index = 0; hash_index < hnat_priv->foe_etry_num; hash_index++) {
282 entry = hnat_priv->foe_table_cpu[i] + hash_index;
283 if (entry->bfib1.state == BIND &&
284 entry->ipv4_hnapt.new_dip == ntohl(dip)) {
285 *((u32 *)h_dest) = swab32(entry->ipv4_hnapt.dmac_hi);
286 *((u16 *)&h_dest[4]) =
287 swab16(entry->ipv4_hnapt.dmac_lo);
288 if (strncmp(h_dest, neigh->ha, ETH_ALEN) != 0) {
289 pr_info("%s: state=%d\n", __func__,
290 neigh->nud_state);
291 cr_set_field(hnat_priv->ppe_base[i] + PPE_TB_CFG,
292 SMA, SMA_ONLY_FWD_CPU);
developerfd40db22021-04-29 10:08:25 +0800293
developer471f6562021-05-10 20:48:34 +0800294 entry->ipv4_hnapt.udib1.state = INVALID;
295 entry->ipv4_hnapt.udib1.time_stamp =
296 readl((hnat_priv->fe_base + 0x0010)) & 0xFF;
developerfd40db22021-04-29 10:08:25 +0800297
developer471f6562021-05-10 20:48:34 +0800298 /* clear HWNAT cache */
299 hnat_cache_ebl(1);
developerfd40db22021-04-29 10:08:25 +0800300
developer471f6562021-05-10 20:48:34 +0800301 mod_timer(&hnat_priv->hnat_sma_build_entry_timer,
302 jiffies + 3 * HZ);
developerfd40db22021-04-29 10:08:25 +0800303
developer471f6562021-05-10 20:48:34 +0800304 pr_info("Delete old entry: dip =%pI4\n", &dip);
305 pr_info("Old mac= %pM\n", h_dest);
306 pr_info("New mac= %pM\n", neigh->ha);
307 }
developerfd40db22021-04-29 10:08:25 +0800308 }
309 }
310 }
311}
312
313int nf_hnat_netevent_handler(struct notifier_block *unused, unsigned long event,
314 void *ptr)
315{
316 struct net_device *dev = NULL;
317 struct neighbour *neigh = NULL;
318
319 switch (event) {
320 case NETEVENT_NEIGH_UPDATE:
321 neigh = ptr;
322 dev = neigh->dev;
323 if (dev)
324 foe_clear_entry(neigh);
325 break;
326 }
327
328 return NOTIFY_DONE;
329}
330
331unsigned int mape_add_ipv6_hdr(struct sk_buff *skb, struct ipv6hdr mape_ip6h)
332{
333 struct ethhdr *eth = NULL;
334 struct ipv6hdr *ip6h = NULL;
335 struct iphdr *iph = NULL;
336
337 if (skb_headroom(skb) < IPV6_HDR_LEN || skb_shared(skb) ||
338 (skb_cloned(skb) && !skb_clone_writable(skb, 0))) {
339 return -1;
340 }
341
342 /* point to L3 */
343 memcpy(skb->data - IPV6_HDR_LEN - ETH_HLEN, skb_push(skb, ETH_HLEN), ETH_HLEN);
344 memcpy(skb_push(skb, IPV6_HDR_LEN - ETH_HLEN), &mape_ip6h, IPV6_HDR_LEN);
345
346 eth = (struct ethhdr *)(skb->data - ETH_HLEN);
347 eth->h_proto = htons(ETH_P_IPV6);
348 skb->protocol = htons(ETH_P_IPV6);
349
350 iph = (struct iphdr *)(skb->data + IPV6_HDR_LEN);
351 ip6h = (struct ipv6hdr *)(skb->data);
352 ip6h->payload_len = iph->tot_len; /* maybe different with ipv4 */
353
354 skb_set_network_header(skb, 0);
355 skb_set_transport_header(skb, iph->ihl * 4 + IPV6_HDR_LEN);
356 return 0;
357}
358
359static void fix_skb_packet_type(struct sk_buff *skb, struct net_device *dev,
360 struct ethhdr *eth)
361{
362 skb->pkt_type = PACKET_HOST;
363 if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
364 if (ether_addr_equal_64bits(eth->h_dest, dev->broadcast))
365 skb->pkt_type = PACKET_BROADCAST;
366 else
367 skb->pkt_type = PACKET_MULTICAST;
368 }
369}
370
371unsigned int do_hnat_ext_to_ge(struct sk_buff *skb, const struct net_device *in,
372 const char *func)
373{
374 if (hnat_priv->g_ppdev && hnat_priv->g_ppdev->flags & IFF_UP) {
375 u16 vlan_id = 0;
376 skb_set_network_header(skb, 0);
377 skb_push(skb, ETH_HLEN);
378 set_to_ppe(skb);
379
380 vlan_id = skb_vlan_tag_get_id(skb);
381 if (vlan_id) {
382 skb = vlan_insert_tag(skb, skb->vlan_proto, skb->vlan_tci);
383 if (!skb)
384 return -1;
385 }
386
387 /*set where we come from*/
388 skb->vlan_proto = htons(ETH_P_8021Q);
389 skb->vlan_tci =
390 (VLAN_CFI_MASK | (in->ifindex & VLAN_VID_MASK));
391 trace_printk(
392 "%s: vlan_prot=0x%x, vlan_tci=%x, in->name=%s, skb->dev->name=%s\n",
393 __func__, ntohs(skb->vlan_proto), skb->vlan_tci,
394 in->name, hnat_priv->g_ppdev->name);
395 skb->dev = hnat_priv->g_ppdev;
396 dev_queue_xmit(skb);
397 trace_printk("%s: called from %s successfully\n", __func__, func);
398 return 0;
399 }
400
401 trace_printk("%s: called from %s fail\n", __func__, func);
402 return -1;
403}
404
405unsigned int do_hnat_ext_to_ge2(struct sk_buff *skb, const char *func)
406{
407 struct ethhdr *eth = eth_hdr(skb);
408 struct net_device *dev;
409 struct foe_entry *entry;
410
411 trace_printk("%s: vlan_prot=0x%x, vlan_tci=%x\n", __func__,
412 ntohs(skb->vlan_proto), skb->vlan_tci);
413
414 dev = get_dev_from_index(skb->vlan_tci & VLAN_VID_MASK);
415
416 if (dev) {
417 /*set where we to go*/
418 skb->dev = dev;
419 skb->vlan_proto = 0;
420 skb->vlan_tci = 0;
421
422 if (ntohs(eth->h_proto) == ETH_P_8021Q) {
423 skb = skb_vlan_untag(skb);
424 if (unlikely(!skb))
425 return -1;
426 }
427
428 if (IS_BOND_MODE &&
429 (((hnat_priv->data->version == MTK_HNAT_V4) &&
430 (skb_hnat_entry(skb) != 0x7fff)) ||
431 ((hnat_priv->data->version != MTK_HNAT_V4) &&
432 (skb_hnat_entry(skb) != 0x3fff))))
433 skb_set_hash(skb, skb_hnat_entry(skb) >> 1, PKT_HASH_TYPE_L4);
434
435 set_from_extge(skb);
436 fix_skb_packet_type(skb, skb->dev, eth);
437 netif_rx(skb);
438 trace_printk("%s: called from %s successfully\n", __func__,
439 func);
440 return 0;
441 } else {
442 /* MapE WAN --> LAN/WLAN PingPong. */
443 dev = get_wandev_from_index(skb->vlan_tci & VLAN_VID_MASK);
444 if (mape_toggle && dev) {
445 if (!mape_add_ipv6_hdr(skb, mape_w2l_v6h)) {
446 skb_set_mac_header(skb, -ETH_HLEN);
447 skb->dev = dev;
448 set_from_mape(skb);
449 skb->vlan_proto = 0;
450 skb->vlan_tci = 0;
451 fix_skb_packet_type(skb, skb->dev, eth_hdr(skb));
developer471f6562021-05-10 20:48:34 +0800452 entry = &hnat_priv->foe_table_cpu[skb_hnat_ppe(skb)][skb_hnat_entry(skb)];
developerfd40db22021-04-29 10:08:25 +0800453 entry->bfib1.pkt_type = IPV4_HNAPT;
454 netif_rx(skb);
455 return 0;
456 }
457 }
458 trace_printk("%s: called from %s fail\n", __func__, func);
459 return -1;
460 }
461}
462
463unsigned int do_hnat_ge_to_ext(struct sk_buff *skb, const char *func)
464{
465 /*set where we to go*/
466 u8 index;
467 struct foe_entry *entry;
468 struct net_device *dev;
469
developer471f6562021-05-10 20:48:34 +0800470 entry = &hnat_priv->foe_table_cpu[skb_hnat_ppe(skb)][skb_hnat_entry(skb)];
developerfd40db22021-04-29 10:08:25 +0800471
472 if (IS_IPV4_GRP(entry))
473 index = entry->ipv4_hnapt.act_dp;
474 else
475 index = entry->ipv6_5t_route.act_dp;
476
477 skb->dev = get_dev_from_index(index);
478
developer34028fb2022-01-11 13:51:29 +0800479 if (IS_HQOS_MODE && eth_hdr(skb)->h_proto == HQOS_MAGIC_TAG) {
developerfd40db22021-04-29 10:08:25 +0800480 skb = skb_unshare(skb, GFP_ATOMIC);
481 if (!skb)
482 return NF_ACCEPT;
483
484 if (unlikely(!pskb_may_pull(skb, VLAN_HLEN)))
485 return NF_ACCEPT;
486
487 skb_pull_rcsum(skb, VLAN_HLEN);
488
489 memmove(skb->data - ETH_HLEN, skb->data - ETH_HLEN - VLAN_HLEN,
490 2 * ETH_ALEN);
491 }
developerfd40db22021-04-29 10:08:25 +0800492
493 if (skb->dev) {
494 skb_set_network_header(skb, 0);
495 skb_push(skb, ETH_HLEN);
496 dev_queue_xmit(skb);
497 trace_printk("%s: called from %s successfully\n", __func__,
498 func);
499 return 0;
500 } else {
501 if (mape_toggle) {
502 /* Add ipv6 header mape for lan/wlan -->wan */
503 dev = get_wandev_from_index(index);
504 if (dev) {
505 if (!mape_add_ipv6_hdr(skb, mape_l2w_v6h)) {
506 skb_set_network_header(skb, 0);
507 skb_push(skb, ETH_HLEN);
508 skb_set_mac_header(skb, 0);
509 skb->dev = dev;
510 dev_queue_xmit(skb);
511 return 0;
512 }
513 trace_printk("%s: called from %s fail[MapE]\n", __func__,
514 func);
515 return -1;
516 }
517 }
518 }
519 /*if external devices is down, invalidate related ppe entry*/
520 if (entry_hnat_is_bound(entry)) {
521 entry->bfib1.state = INVALID;
522 if (IS_IPV4_GRP(entry))
523 entry->ipv4_hnapt.act_dp = 0;
524 else
525 entry->ipv6_5t_route.act_dp = 0;
526
527 /* clear HWNAT cache */
528 hnat_cache_ebl(1);
529 }
530 trace_printk("%s: called from %s fail, index=%x\n", __func__,
531 func, index);
532 return -1;
533}
534
535static void pre_routing_print(struct sk_buff *skb, const struct net_device *in,
536 const struct net_device *out, const char *func)
537{
538 trace_printk(
539 "[%s]: %s(iif=0x%x CB2=0x%x)-->%s (ppe_hash=0x%x) sport=0x%x reason=0x%x alg=0x%x from %s\n",
540 __func__, in->name, skb_hnat_iface(skb),
541 HNAT_SKB_CB2(skb)->magic, out->name, skb_hnat_entry(skb),
542 skb_hnat_sport(skb), skb_hnat_reason(skb), skb_hnat_alg(skb),
543 func);
544}
545
546static void post_routing_print(struct sk_buff *skb, const struct net_device *in,
547 const struct net_device *out, const char *func)
548{
549 trace_printk(
550 "[%s]: %s(iif=0x%x, CB2=0x%x)-->%s (ppe_hash=0x%x) sport=0x%x reason=0x%x alg=0x%x from %s\n",
551 __func__, in->name, skb_hnat_iface(skb),
552 HNAT_SKB_CB2(skb)->magic, out->name, skb_hnat_entry(skb),
553 skb_hnat_sport(skb), skb_hnat_reason(skb), skb_hnat_alg(skb),
554 func);
555}
556
557static inline void hnat_set_iif(const struct nf_hook_state *state,
558 struct sk_buff *skb, int val)
559{
developer40017972021-06-29 14:27:35 +0800560 if (IS_WHNAT(state->in) && FROM_WED(skb)) {
developere567ad32021-05-25 17:16:17 +0800561 return;
562 } else if (IS_LAN(state->in)) {
developerfd40db22021-04-29 10:08:25 +0800563 skb_hnat_iface(skb) = FOE_MAGIC_GE_LAN;
564 } else if (IS_PPD(state->in)) {
565 skb_hnat_iface(skb) = FOE_MAGIC_GE_PPD;
566 } else if (IS_EXT(state->in)) {
567 skb_hnat_iface(skb) = FOE_MAGIC_EXT;
568 } else if (IS_WAN(state->in)) {
569 skb_hnat_iface(skb) = FOE_MAGIC_GE_WAN;
developerfd40db22021-04-29 10:08:25 +0800570 } else if (!IS_BR(state->in)) {
developer99506e52021-06-30 22:03:02 +0800571 if (state->in->netdev_ops->ndo_flow_offload_check) {
572 skb_hnat_iface(skb) = FOE_MAGIC_GE_VIRTUAL;
573 } else {
574 skb_hnat_iface(skb) = FOE_INVALID;
developerfd40db22021-04-29 10:08:25 +0800575
developer99506e52021-06-30 22:03:02 +0800576 if (is_magic_tag_valid(skb) &&
577 IS_SPACE_AVAILABLE_HEAD(skb))
578 memset(skb_hnat_info(skb), 0, FOE_INFO_LEN);
579 }
developerfd40db22021-04-29 10:08:25 +0800580 }
581}
582
583static inline void hnat_set_alg(const struct nf_hook_state *state,
584 struct sk_buff *skb, int val)
585{
586 skb_hnat_alg(skb) = val;
587}
588
589static inline void hnat_set_head_frags(const struct nf_hook_state *state,
590 struct sk_buff *head_skb, int val,
591 void (*fn)(const struct nf_hook_state *state,
592 struct sk_buff *skb, int val))
593{
594 struct sk_buff *segs = skb_shinfo(head_skb)->frag_list;
595
596 fn(state, head_skb, val);
597 while (segs) {
598 fn(state, segs, val);
599 segs = segs->next;
600 }
601}
602
603unsigned int do_hnat_mape_w2l_fast(struct sk_buff *skb, const struct net_device *in,
604 const char *func)
605{
606 struct ipv6hdr *ip6h = ipv6_hdr(skb);
607 struct iphdr _iphdr;
608 struct iphdr *iph;
609 struct ethhdr *eth;
610
611 /* WAN -> LAN/WLAN MapE. */
612 if (mape_toggle && (ip6h->nexthdr == NEXTHDR_IPIP)) {
613 iph = skb_header_pointer(skb, IPV6_HDR_LEN, sizeof(_iphdr), &_iphdr);
developer4c32b7a2021-11-13 16:46:43 +0800614 if (unlikely(!iph))
615 return -1;
616
developerfd40db22021-04-29 10:08:25 +0800617 switch (iph->protocol) {
618 case IPPROTO_UDP:
619 case IPPROTO_TCP:
620 break;
621 default:
622 return -1;
623 }
624 mape_w2l_v6h = *ip6h;
625
626 /* Remove ipv6 header. */
627 memcpy(skb->data + IPV6_HDR_LEN - ETH_HLEN,
628 skb->data - ETH_HLEN, ETH_HLEN);
629 skb_pull(skb, IPV6_HDR_LEN - ETH_HLEN);
630 skb_set_mac_header(skb, 0);
631 skb_set_network_header(skb, ETH_HLEN);
632 skb_set_transport_header(skb, ETH_HLEN + sizeof(_iphdr));
633
634 eth = eth_hdr(skb);
635 eth->h_proto = htons(ETH_P_IP);
636 set_to_ppe(skb);
637
638 skb->vlan_proto = htons(ETH_P_8021Q);
639 skb->vlan_tci =
640 (VLAN_CFI_MASK | (in->ifindex & VLAN_VID_MASK));
641
642 if (!hnat_priv->g_ppdev)
643 hnat_priv->g_ppdev = dev_get_by_name(&init_net, hnat_priv->ppd);
644
645 skb->dev = hnat_priv->g_ppdev;
646 skb->protocol = htons(ETH_P_IP);
647
648 dev_queue_xmit(skb);
649
650 return 0;
651 }
652 return -1;
653}
654
655static unsigned int is_ppe_support_type(struct sk_buff *skb)
656{
657 struct ethhdr *eth = NULL;
658 struct iphdr *iph = NULL;
659 struct ipv6hdr *ip6h = NULL;
660 struct iphdr _iphdr;
661
662 eth = eth_hdr(skb);
developerfd2d7422021-06-09 17:09:39 +0800663 if (!is_magic_tag_valid(skb) || !IS_SPACE_AVAILABLE_HEAD(skb) ||
developerb254f762022-01-20 20:06:25 +0800664 is_broadcast_ether_addr(eth->h_dest))
developerfd40db22021-04-29 10:08:25 +0800665 return 0;
666
667 switch (ntohs(skb->protocol)) {
668 case ETH_P_IP:
669 iph = ip_hdr(skb);
670
671 /* do not accelerate non tcp/udp traffic */
672 if ((iph->protocol == IPPROTO_TCP) ||
673 (iph->protocol == IPPROTO_UDP) ||
674 (iph->protocol == IPPROTO_IPV6)) {
675 return 1;
676 }
677
678 break;
679 case ETH_P_IPV6:
680 ip6h = ipv6_hdr(skb);
681
682 if ((ip6h->nexthdr == NEXTHDR_TCP) ||
683 (ip6h->nexthdr == NEXTHDR_UDP)) {
684 return 1;
685 } else if (ip6h->nexthdr == NEXTHDR_IPIP) {
686 iph = skb_header_pointer(skb, IPV6_HDR_LEN,
687 sizeof(_iphdr), &_iphdr);
developer4c32b7a2021-11-13 16:46:43 +0800688 if (unlikely(!iph))
689 return 0;
developerfd40db22021-04-29 10:08:25 +0800690
691 if ((iph->protocol == IPPROTO_TCP) ||
692 (iph->protocol == IPPROTO_UDP)) {
693 return 1;
694 }
695
696 }
697
698 break;
699 case ETH_P_8021Q:
700 return 1;
701 }
702
703 return 0;
704}
705
706static unsigned int
707mtk_hnat_ipv6_nf_pre_routing(void *priv, struct sk_buff *skb,
708 const struct nf_hook_state *state)
709{
710 if (!is_ppe_support_type(skb)) {
711 hnat_set_head_frags(state, skb, 1, hnat_set_alg);
712 return NF_ACCEPT;
713 }
714
715 hnat_set_head_frags(state, skb, -1, hnat_set_iif);
716
717 pre_routing_print(skb, state->in, state->out, __func__);
718
developerfd40db22021-04-29 10:08:25 +0800719 /* packets from external devices -> xxx ,step 1 , learning stage & bound stage*/
720 if (do_ext2ge_fast_try(state->in, skb)) {
721 if (!do_hnat_ext_to_ge(skb, state->in, __func__))
722 return NF_STOLEN;
723 if (!skb)
724 goto drop;
725 return NF_ACCEPT;
726 }
727
728 /* packets form ge -> external device
729 * For standalone wan interface
730 */
731 if (do_ge2ext_fast(state->in, skb)) {
732 if (!do_hnat_ge_to_ext(skb, __func__))
733 return NF_STOLEN;
734 goto drop;
735 }
736
737 /* MapE need remove ipv6 header and pingpong. */
738 if (do_mape_w2l_fast(state->in, skb)) {
739 if (!do_hnat_mape_w2l_fast(skb, state->in, __func__))
740 return NF_STOLEN;
741 else
742 return NF_ACCEPT;
743 }
744
745 if (is_from_mape(skb))
746 clr_from_extge(skb);
747
748 return NF_ACCEPT;
749drop:
750 printk_ratelimited(KERN_WARNING
751 "%s:drop (in_dev=%s, iif=0x%x, CB2=0x%x, ppe_hash=0x%x, sport=0x%x, reason=0x%x, alg=0x%x)\n",
752 __func__, state->in->name, skb_hnat_iface(skb),
753 HNAT_SKB_CB2(skb)->magic, skb_hnat_entry(skb),
754 skb_hnat_sport(skb), skb_hnat_reason(skb),
755 skb_hnat_alg(skb));
756
757 return NF_DROP;
758}
759
760static unsigned int
761mtk_hnat_ipv4_nf_pre_routing(void *priv, struct sk_buff *skb,
762 const struct nf_hook_state *state)
763{
764 if (!is_ppe_support_type(skb)) {
765 hnat_set_head_frags(state, skb, 1, hnat_set_alg);
766 return NF_ACCEPT;
767 }
768
769 hnat_set_head_frags(state, skb, -1, hnat_set_iif);
770
771 pre_routing_print(skb, state->in, state->out, __func__);
772
developerfd40db22021-04-29 10:08:25 +0800773 /* packets from external devices -> xxx ,step 1 , learning stage & bound stage*/
774 if (do_ext2ge_fast_try(state->in, skb)) {
775 if (!do_hnat_ext_to_ge(skb, state->in, __func__))
776 return NF_STOLEN;
777 if (!skb)
778 goto drop;
779 return NF_ACCEPT;
780 }
781
782 /* packets form ge -> external device
783 * For standalone wan interface
784 */
785 if (do_ge2ext_fast(state->in, skb)) {
786 if (!do_hnat_ge_to_ext(skb, __func__))
787 return NF_STOLEN;
788 goto drop;
789 }
790
791 return NF_ACCEPT;
792drop:
793 printk_ratelimited(KERN_WARNING
794 "%s:drop (in_dev=%s, iif=0x%x, CB2=0x%x, ppe_hash=0x%x, sport=0x%x, reason=0x%x, alg=0x%x)\n",
795 __func__, state->in->name, skb_hnat_iface(skb),
796 HNAT_SKB_CB2(skb)->magic, skb_hnat_entry(skb),
797 skb_hnat_sport(skb), skb_hnat_reason(skb),
798 skb_hnat_alg(skb));
799
800 return NF_DROP;
801}
802
803static unsigned int
804mtk_hnat_br_nf_local_in(void *priv, struct sk_buff *skb,
805 const struct nf_hook_state *state)
806{
developerfd40db22021-04-29 10:08:25 +0800807 struct vlan_ethhdr *veth;
808
developer34028fb2022-01-11 13:51:29 +0800809 if (IS_HQOS_MODE && hnat_priv->data->whnat) {
developerfd40db22021-04-29 10:08:25 +0800810 veth = (struct vlan_ethhdr *)skb_mac_header(skb);
811
812 if (eth_hdr(skb)->h_proto == HQOS_MAGIC_TAG) {
813 skb_hnat_entry(skb) = ntohs(veth->h_vlan_TCI) & 0x3fff;
814 skb_hnat_reason(skb) = HIT_BIND_FORCE_TO_CPU;
815 }
816 }
developerfd40db22021-04-29 10:08:25 +0800817
818 if (!HAS_HQOS_MAGIC_TAG(skb) && !is_ppe_support_type(skb)) {
819 hnat_set_head_frags(state, skb, 1, hnat_set_alg);
820 return NF_ACCEPT;
821 }
822
823 hnat_set_head_frags(state, skb, -1, hnat_set_iif);
824
825 pre_routing_print(skb, state->in, state->out, __func__);
826
827 if (unlikely(debug_level >= 7)) {
828 hnat_cpu_reason_cnt(skb);
829 if (skb_hnat_reason(skb) == dbg_cpu_reason)
830 foe_dump_pkt(skb);
831 }
832
developerfd40db22021-04-29 10:08:25 +0800833 /* packets from external devices -> xxx ,step 1 , learning stage & bound stage*/
834 if ((skb_hnat_iface(skb) == FOE_MAGIC_EXT) && !is_from_extge(skb) &&
835 !is_multicast_ether_addr(eth_hdr(skb)->h_dest)) {
836 if (!hnat_priv->g_ppdev)
837 hnat_priv->g_ppdev = dev_get_by_name(&init_net, hnat_priv->ppd);
838
839 if (!do_hnat_ext_to_ge(skb, state->in, __func__))
840 return NF_STOLEN;
841 if (!skb)
842 goto drop;
843 return NF_ACCEPT;
844 }
845
846 if (hnat_priv->data->whnat) {
847 if (skb_hnat_iface(skb) == FOE_MAGIC_EXT)
848 clr_from_extge(skb);
849
850 /* packets from external devices -> xxx ,step 2, learning stage */
developeraf07fad2021-11-19 17:53:42 +0800851 if (do_ext2ge_fast_learn(state->in, skb) && (!qos_toggle ||
852 (qos_toggle && eth_hdr(skb)->h_proto != HQOS_MAGIC_TAG))) {
developerfd40db22021-04-29 10:08:25 +0800853 if (!do_hnat_ext_to_ge2(skb, __func__))
854 return NF_STOLEN;
855 goto drop;
856 }
857
858 /* packets form ge -> external device */
859 if (do_ge2ext_fast(state->in, skb)) {
860 if (!do_hnat_ge_to_ext(skb, __func__))
861 return NF_STOLEN;
862 goto drop;
863 }
864 }
865
866 /* MapE need remove ipv6 header and pingpong. (bridge mode) */
867 if (do_mape_w2l_fast(state->in, skb)) {
868 if (!do_hnat_mape_w2l_fast(skb, state->in, __func__))
869 return NF_STOLEN;
870 else
871 return NF_ACCEPT;
872 }
873
874 return NF_ACCEPT;
875drop:
876 printk_ratelimited(KERN_WARNING
877 "%s:drop (in_dev=%s, iif=0x%x, CB2=0x%x, ppe_hash=0x%x, sport=0x%x, reason=0x%x, alg=0x%x)\n",
878 __func__, state->in->name, skb_hnat_iface(skb),
879 HNAT_SKB_CB2(skb)->magic, skb_hnat_entry(skb),
880 skb_hnat_sport(skb), skb_hnat_reason(skb),
881 skb_hnat_alg(skb));
882
883 return NF_DROP;
884}
885
886static unsigned int hnat_ipv6_get_nexthop(struct sk_buff *skb,
887 const struct net_device *out,
888 struct flow_offload_hw_path *hw_path)
889{
890 const struct in6_addr *ipv6_nexthop;
891 struct neighbour *neigh = NULL;
892 struct dst_entry *dst = skb_dst(skb);
893 struct ethhdr *eth;
894
895 if (hw_path->flags & FLOW_OFFLOAD_PATH_PPPOE) {
896 memcpy(eth_hdr(skb)->h_source, hw_path->eth_src, ETH_ALEN);
897 memcpy(eth_hdr(skb)->h_dest, hw_path->eth_dest, ETH_ALEN);
898 return 0;
899 }
900
901 rcu_read_lock_bh();
902 ipv6_nexthop =
903 rt6_nexthop((struct rt6_info *)dst, &ipv6_hdr(skb)->daddr);
904 neigh = __ipv6_neigh_lookup_noref(dst->dev, ipv6_nexthop);
905 if (unlikely(!neigh)) {
906 dev_notice(hnat_priv->dev, "%s:No neigh (daddr=%pI6)\n", __func__,
907 &ipv6_hdr(skb)->daddr);
908 rcu_read_unlock_bh();
909 return -1;
910 }
911
912 /* why do we get all zero ethernet address ? */
913 if (!is_valid_ether_addr(neigh->ha)) {
914 rcu_read_unlock_bh();
915 return -1;
916 }
917
918 if (ipv6_hdr(skb)->nexthdr == NEXTHDR_IPIP) {
919 /*copy ether type for DS-Lite and MapE */
920 eth = (struct ethhdr *)(skb->data - ETH_HLEN);
921 eth->h_proto = skb->protocol;
922 } else {
923 eth = eth_hdr(skb);
924 }
925
926 ether_addr_copy(eth->h_dest, neigh->ha);
927 ether_addr_copy(eth->h_source, out->dev_addr);
928
929 rcu_read_unlock_bh();
930
931 return 0;
932}
933
934static unsigned int hnat_ipv4_get_nexthop(struct sk_buff *skb,
935 const struct net_device *out,
936 struct flow_offload_hw_path *hw_path)
937{
938 u32 nexthop;
939 struct neighbour *neigh;
940 struct dst_entry *dst = skb_dst(skb);
941 struct rtable *rt = (struct rtable *)dst;
942 struct net_device *dev = (__force struct net_device *)out;
943
944 if (hw_path->flags & FLOW_OFFLOAD_PATH_PPPOE) {
945 memcpy(eth_hdr(skb)->h_source, hw_path->eth_src, ETH_ALEN);
946 memcpy(eth_hdr(skb)->h_dest, hw_path->eth_dest, ETH_ALEN);
947 return 0;
948 }
949
950 rcu_read_lock_bh();
951 nexthop = (__force u32)rt_nexthop(rt, ip_hdr(skb)->daddr);
952 neigh = __ipv4_neigh_lookup_noref(dev, nexthop);
953 if (unlikely(!neigh)) {
954 dev_notice(hnat_priv->dev, "%s:No neigh (daddr=%pI4)\n", __func__,
955 &ip_hdr(skb)->daddr);
956 rcu_read_unlock_bh();
957 return -1;
958 }
959
960 /* why do we get all zero ethernet address ? */
961 if (!is_valid_ether_addr(neigh->ha)) {
962 rcu_read_unlock_bh();
963 return -1;
964 }
965
966 memcpy(eth_hdr(skb)->h_dest, neigh->ha, ETH_ALEN);
967 memcpy(eth_hdr(skb)->h_source, out->dev_addr, ETH_ALEN);
968
969 rcu_read_unlock_bh();
970
971 return 0;
972}
973
974static u16 ppe_get_chkbase(struct iphdr *iph)
975{
976 u16 org_chksum = ntohs(iph->check);
977 u16 org_tot_len = ntohs(iph->tot_len);
978 u16 org_id = ntohs(iph->id);
979 u16 chksum_tmp, tot_len_tmp, id_tmp;
980 u32 tmp = 0;
981 u16 chksum_base = 0;
982
983 chksum_tmp = ~(org_chksum);
984 tot_len_tmp = ~(org_tot_len);
985 id_tmp = ~(org_id);
986 tmp = chksum_tmp + tot_len_tmp + id_tmp;
987 tmp = ((tmp >> 16) & 0x7) + (tmp & 0xFFFF);
988 tmp = ((tmp >> 16) & 0x7) + (tmp & 0xFFFF);
989 chksum_base = tmp & 0xFFFF;
990
991 return chksum_base;
992}
993
994struct foe_entry ppe_fill_L2_info(struct ethhdr *eth, struct foe_entry entry,
995 struct flow_offload_hw_path *hw_path)
996{
997 switch (entry.bfib1.pkt_type) {
998 case IPV4_HNAPT:
999 case IPV4_HNAT:
1000 entry.ipv4_hnapt.dmac_hi = swab32(*((u32 *)eth->h_dest));
1001 entry.ipv4_hnapt.dmac_lo = swab16(*((u16 *)&eth->h_dest[4]));
1002 entry.ipv4_hnapt.smac_hi = swab32(*((u32 *)eth->h_source));
1003 entry.ipv4_hnapt.smac_lo = swab16(*((u16 *)&eth->h_source[4]));
1004 entry.ipv4_hnapt.pppoe_id = hw_path->pppoe_sid;
1005 break;
1006 case IPV4_DSLITE:
1007 case IPV4_MAP_E:
1008 case IPV6_6RD:
1009 case IPV6_5T_ROUTE:
1010 case IPV6_3T_ROUTE:
1011 entry.ipv6_5t_route.dmac_hi = swab32(*((u32 *)eth->h_dest));
1012 entry.ipv6_5t_route.dmac_lo = swab16(*((u16 *)&eth->h_dest[4]));
1013 entry.ipv6_5t_route.smac_hi = swab32(*((u32 *)eth->h_source));
1014 entry.ipv6_5t_route.smac_lo =
1015 swab16(*((u16 *)&eth->h_source[4]));
1016 entry.ipv6_5t_route.pppoe_id = hw_path->pppoe_sid;
1017 break;
1018 }
1019 return entry;
1020}
1021
1022struct foe_entry ppe_fill_info_blk(struct ethhdr *eth, struct foe_entry entry,
1023 struct flow_offload_hw_path *hw_path)
1024{
1025 entry.bfib1.psn = (hw_path->flags & FLOW_OFFLOAD_PATH_PPPOE) ? 1 : 0;
1026 entry.bfib1.vlan_layer += (hw_path->flags & FLOW_OFFLOAD_PATH_VLAN) ? 1 : 0;
1027 entry.bfib1.vpm = (entry.bfib1.vlan_layer) ? 1 : 0;
1028 entry.bfib1.ttl = 1;
1029 entry.bfib1.cah = 1;
developerfd40db22021-04-29 10:08:25 +08001030 entry.bfib1.time_stamp = (hnat_priv->data->version == MTK_HNAT_V4) ?
1031 readl(hnat_priv->fe_base + 0x0010) & (0xFF) :
1032 readl(hnat_priv->fe_base + 0x0010) & (0x7FFF);
1033
1034 switch (entry.bfib1.pkt_type) {
1035 case IPV4_HNAPT:
1036 case IPV4_HNAT:
developer8116b0a2021-08-23 18:07:20 +08001037 if (hnat_priv->data->mcast &&
1038 is_multicast_ether_addr(&eth->h_dest[0])) {
developerfd40db22021-04-29 10:08:25 +08001039 entry.ipv4_hnapt.iblk2.mcast = 1;
1040 if (hnat_priv->data->version == MTK_HNAT_V3) {
1041 entry.bfib1.sta = 1;
1042 entry.ipv4_hnapt.m_timestamp = foe_timestamp(hnat_priv);
1043 }
1044 } else {
1045 entry.ipv4_hnapt.iblk2.mcast = 0;
1046 }
1047
1048 entry.ipv4_hnapt.iblk2.port_ag =
developer24948202021-11-24 17:38:27 +08001049 (hnat_priv->data->version == MTK_HNAT_V4) ? 0xf : 0x3f;
developerfd40db22021-04-29 10:08:25 +08001050 break;
1051 case IPV4_DSLITE:
1052 case IPV4_MAP_E:
1053 case IPV6_6RD:
1054 case IPV6_5T_ROUTE:
1055 case IPV6_3T_ROUTE:
developer8116b0a2021-08-23 18:07:20 +08001056 if (hnat_priv->data->mcast &&
1057 is_multicast_ether_addr(&eth->h_dest[0])) {
developerfd40db22021-04-29 10:08:25 +08001058 entry.ipv6_5t_route.iblk2.mcast = 1;
1059 if (hnat_priv->data->version == MTK_HNAT_V3) {
1060 entry.bfib1.sta = 1;
1061 entry.ipv4_hnapt.m_timestamp = foe_timestamp(hnat_priv);
1062 }
1063 } else {
1064 entry.ipv6_5t_route.iblk2.mcast = 0;
1065 }
1066
1067 entry.ipv6_5t_route.iblk2.port_ag =
developer24948202021-11-24 17:38:27 +08001068 (hnat_priv->data->version == MTK_HNAT_V4) ? 0xf : 0x3f;
developerfd40db22021-04-29 10:08:25 +08001069 break;
1070 }
1071 return entry;
1072}
1073
1074static void ppe_fill_flow_lbl(struct foe_entry *entry, struct ipv6hdr *ip6h)
1075{
1076 entry->ipv4_dslite.flow_lbl[0] = ip6h->flow_lbl[2];
1077 entry->ipv4_dslite.flow_lbl[1] = ip6h->flow_lbl[1];
1078 entry->ipv4_dslite.flow_lbl[2] = ip6h->flow_lbl[0];
1079}
1080
1081static unsigned int skb_to_hnat_info(struct sk_buff *skb,
1082 const struct net_device *dev,
1083 struct foe_entry *foe,
1084 struct flow_offload_hw_path *hw_path)
1085{
1086 struct foe_entry entry = { 0 };
1087 int whnat = IS_WHNAT(dev);
1088 struct ethhdr *eth;
1089 struct iphdr *iph;
1090 struct ipv6hdr *ip6h;
1091 struct tcpudphdr _ports;
1092 const struct tcpudphdr *pptr;
1093 u32 gmac = NR_DISCARD;
1094 int udp = 0;
1095 u32 qid = 0;
developeraf07fad2021-11-19 17:53:42 +08001096 u32 port_id = 0;
developerfd40db22021-04-29 10:08:25 +08001097 int mape = 0;
1098
1099 if (ipv6_hdr(skb)->nexthdr == NEXTHDR_IPIP)
1100 /* point to ethernet header for DS-Lite and MapE */
1101 eth = (struct ethhdr *)(skb->data - ETH_HLEN);
1102 else
1103 eth = eth_hdr(skb);
developer8116b0a2021-08-23 18:07:20 +08001104
1105 /*do not bind multicast if PPE mcast not enable*/
1106 if (!hnat_priv->data->mcast && is_multicast_ether_addr(eth->h_dest))
1107 return 0;
developerfd40db22021-04-29 10:08:25 +08001108
1109 entry.bfib1.pkt_type = foe->udib1.pkt_type; /* Get packte type state*/
developerf94d8862022-03-29 10:11:17 +08001110 entry.bfib1.state = foe->udib1.state;
1111
developerfd40db22021-04-29 10:08:25 +08001112#if defined(CONFIG_MEDIATEK_NETSYS_V2)
1113 entry.bfib1.sp = foe->udib1.sp;
1114#endif
1115
1116 switch (ntohs(eth->h_proto)) {
1117 case ETH_P_IP:
1118 iph = ip_hdr(skb);
1119 switch (iph->protocol) {
1120 case IPPROTO_UDP:
1121 udp = 1;
1122 /* fallthrough */
1123 case IPPROTO_TCP:
1124 entry.ipv4_hnapt.etype = htons(ETH_P_IP);
1125
1126 /* DS-Lite WAN->LAN */
1127 if (entry.ipv4_hnapt.bfib1.pkt_type == IPV4_DSLITE ||
1128 entry.ipv4_hnapt.bfib1.pkt_type == IPV4_MAP_E) {
1129 entry.ipv4_dslite.sip = foe->ipv4_dslite.sip;
1130 entry.ipv4_dslite.dip = foe->ipv4_dslite.dip;
1131 entry.ipv4_dslite.sport =
1132 foe->ipv4_dslite.sport;
1133 entry.ipv4_dslite.dport =
1134 foe->ipv4_dslite.dport;
1135
1136#if defined(CONFIG_MEDIATEK_NETSYS_V2)
1137 if (entry.bfib1.pkt_type == IPV4_MAP_E) {
1138 pptr = skb_header_pointer(skb,
1139 iph->ihl * 4,
1140 sizeof(_ports),
1141 &_ports);
developer4c32b7a2021-11-13 16:46:43 +08001142 if (unlikely(!pptr))
1143 return -1;
developerfd40db22021-04-29 10:08:25 +08001144
1145 entry.ipv4_dslite.new_sip =
1146 ntohl(iph->saddr);
1147 entry.ipv4_dslite.new_dip =
1148 ntohl(iph->daddr);
1149 entry.ipv4_dslite.new_sport =
1150 ntohs(pptr->src);
1151 entry.ipv4_dslite.new_dport =
1152 ntohs(pptr->dst);
1153 }
1154#endif
1155
1156 entry.ipv4_dslite.tunnel_sipv6_0 =
1157 foe->ipv4_dslite.tunnel_sipv6_0;
1158 entry.ipv4_dslite.tunnel_sipv6_1 =
1159 foe->ipv4_dslite.tunnel_sipv6_1;
1160 entry.ipv4_dslite.tunnel_sipv6_2 =
1161 foe->ipv4_dslite.tunnel_sipv6_2;
1162 entry.ipv4_dslite.tunnel_sipv6_3 =
1163 foe->ipv4_dslite.tunnel_sipv6_3;
1164
1165 entry.ipv4_dslite.tunnel_dipv6_0 =
1166 foe->ipv4_dslite.tunnel_dipv6_0;
1167 entry.ipv4_dslite.tunnel_dipv6_1 =
1168 foe->ipv4_dslite.tunnel_dipv6_1;
1169 entry.ipv4_dslite.tunnel_dipv6_2 =
1170 foe->ipv4_dslite.tunnel_dipv6_2;
1171 entry.ipv4_dslite.tunnel_dipv6_3 =
1172 foe->ipv4_dslite.tunnel_dipv6_3;
1173
1174 entry.ipv4_dslite.bfib1.rmt = 1;
1175 entry.ipv4_dslite.iblk2.dscp = iph->tos;
1176 entry.ipv4_dslite.vlan1 = hw_path->vlan_id;
1177 if (hnat_priv->data->per_flow_accounting)
1178 entry.ipv4_dslite.iblk2.mibf = 1;
1179
1180 } else {
1181 entry.ipv4_hnapt.iblk2.dscp = iph->tos;
1182 if (hnat_priv->data->per_flow_accounting)
1183 entry.ipv4_hnapt.iblk2.mibf = 1;
1184
1185 entry.ipv4_hnapt.vlan1 = hw_path->vlan_id;
1186
1187 if (skb->vlan_tci && FROM_GE_WAN(skb) && IS_LAN(dev)) {
1188 entry.bfib1.vlan_layer += 1;
1189
1190 if (entry.ipv4_hnapt.vlan1)
1191 entry.ipv4_hnapt.vlan2 = (skb->vlan_tci & VLAN_VID_MASK);
1192 else
1193 entry.ipv4_hnapt.vlan1 = (skb->vlan_tci & VLAN_VID_MASK);
1194 }
1195
1196 entry.ipv4_hnapt.sip = foe->ipv4_hnapt.sip;
1197 entry.ipv4_hnapt.dip = foe->ipv4_hnapt.dip;
1198 entry.ipv4_hnapt.sport = foe->ipv4_hnapt.sport;
1199 entry.ipv4_hnapt.dport = foe->ipv4_hnapt.dport;
1200
1201 entry.ipv4_hnapt.new_sip = ntohl(iph->saddr);
1202 entry.ipv4_hnapt.new_dip = ntohl(iph->daddr);
1203 }
1204
1205 entry.ipv4_hnapt.bfib1.udp = udp;
1206 if (IS_IPV4_HNAPT(foe)) {
1207 pptr = skb_header_pointer(skb, iph->ihl * 4,
1208 sizeof(_ports),
1209 &_ports);
developer4c32b7a2021-11-13 16:46:43 +08001210 if (unlikely(!pptr))
1211 return -1;
1212
developerfd40db22021-04-29 10:08:25 +08001213 entry.ipv4_hnapt.new_sport = ntohs(pptr->src);
1214 entry.ipv4_hnapt.new_dport = ntohs(pptr->dst);
1215 }
1216
1217 break;
1218
1219 default:
1220 return -1;
1221 }
1222 trace_printk(
1223 "[%s]skb->head=%p, skb->data=%p,ip_hdr=%p, skb->len=%d, skb->data_len=%d\n",
1224 __func__, skb->head, skb->data, iph, skb->len,
1225 skb->data_len);
1226 break;
1227
1228 case ETH_P_IPV6:
1229 ip6h = ipv6_hdr(skb);
1230 switch (ip6h->nexthdr) {
1231 case NEXTHDR_UDP:
1232 udp = 1;
1233 /* fallthrough */
1234 case NEXTHDR_TCP: /* IPv6-5T or IPv6-3T */
1235 entry.ipv6_5t_route.etype = htons(ETH_P_IPV6);
1236
1237 entry.ipv6_5t_route.vlan1 = hw_path->vlan_id;
1238
1239 if (skb->vlan_tci && FROM_GE_WAN(skb) && IS_LAN(dev)) {
1240 entry.bfib1.vlan_layer += 1;
1241
1242 if (entry.ipv6_5t_route.vlan1)
1243 entry.ipv6_5t_route.vlan2 = (skb->vlan_tci & VLAN_VID_MASK);
1244 else
1245 entry.ipv6_5t_route.vlan1 = (skb->vlan_tci & VLAN_VID_MASK);
1246 }
1247
1248 if (hnat_priv->data->per_flow_accounting)
1249 entry.ipv6_5t_route.iblk2.mibf = 1;
1250 entry.ipv6_5t_route.bfib1.udp = udp;
1251
1252 if (IS_IPV6_6RD(foe)) {
1253 entry.ipv6_5t_route.bfib1.rmt = 1;
1254 entry.ipv6_6rd.tunnel_sipv4 =
1255 foe->ipv6_6rd.tunnel_sipv4;
1256 entry.ipv6_6rd.tunnel_dipv4 =
1257 foe->ipv6_6rd.tunnel_dipv4;
1258 }
1259
1260 entry.ipv6_3t_route.ipv6_sip0 =
1261 foe->ipv6_3t_route.ipv6_sip0;
1262 entry.ipv6_3t_route.ipv6_sip1 =
1263 foe->ipv6_3t_route.ipv6_sip1;
1264 entry.ipv6_3t_route.ipv6_sip2 =
1265 foe->ipv6_3t_route.ipv6_sip2;
1266 entry.ipv6_3t_route.ipv6_sip3 =
1267 foe->ipv6_3t_route.ipv6_sip3;
1268
1269 entry.ipv6_3t_route.ipv6_dip0 =
1270 foe->ipv6_3t_route.ipv6_dip0;
1271 entry.ipv6_3t_route.ipv6_dip1 =
1272 foe->ipv6_3t_route.ipv6_dip1;
1273 entry.ipv6_3t_route.ipv6_dip2 =
1274 foe->ipv6_3t_route.ipv6_dip2;
1275 entry.ipv6_3t_route.ipv6_dip3 =
1276 foe->ipv6_3t_route.ipv6_dip3;
1277
developer729f0272021-06-09 17:28:38 +08001278 if (IS_IPV6_3T_ROUTE(foe)) {
1279 entry.ipv6_3t_route.prot =
1280 foe->ipv6_3t_route.prot;
1281 entry.ipv6_3t_route.hph =
1282 foe->ipv6_3t_route.hph;
1283 }
1284
developerfd40db22021-04-29 10:08:25 +08001285 if (IS_IPV6_5T_ROUTE(foe) || IS_IPV6_6RD(foe)) {
1286 entry.ipv6_5t_route.sport =
1287 foe->ipv6_5t_route.sport;
1288 entry.ipv6_5t_route.dport =
1289 foe->ipv6_5t_route.dport;
1290 }
1291 entry.ipv6_5t_route.iblk2.dscp =
1292 (ip6h->priority << 4 |
1293 (ip6h->flow_lbl[0] >> 4));
1294 break;
1295
1296 case NEXTHDR_IPIP:
1297 if ((!mape_toggle &&
1298 entry.bfib1.pkt_type == IPV4_DSLITE) ||
1299 (mape_toggle &&
1300 entry.bfib1.pkt_type == IPV4_MAP_E)) {
1301 /* DS-Lite LAN->WAN */
1302 entry.ipv4_dslite.bfib1.udp =
1303 foe->ipv4_dslite.bfib1.udp;
1304 entry.ipv4_dslite.sip = foe->ipv4_dslite.sip;
1305 entry.ipv4_dslite.dip = foe->ipv4_dslite.dip;
1306 entry.ipv4_dslite.sport =
1307 foe->ipv4_dslite.sport;
1308 entry.ipv4_dslite.dport =
1309 foe->ipv4_dslite.dport;
1310
1311 entry.ipv4_dslite.tunnel_sipv6_0 =
1312 ntohl(ip6h->saddr.s6_addr32[0]);
1313 entry.ipv4_dslite.tunnel_sipv6_1 =
1314 ntohl(ip6h->saddr.s6_addr32[1]);
1315 entry.ipv4_dslite.tunnel_sipv6_2 =
1316 ntohl(ip6h->saddr.s6_addr32[2]);
1317 entry.ipv4_dslite.tunnel_sipv6_3 =
1318 ntohl(ip6h->saddr.s6_addr32[3]);
1319
1320 entry.ipv4_dslite.tunnel_dipv6_0 =
1321 ntohl(ip6h->daddr.s6_addr32[0]);
1322 entry.ipv4_dslite.tunnel_dipv6_1 =
1323 ntohl(ip6h->daddr.s6_addr32[1]);
1324 entry.ipv4_dslite.tunnel_dipv6_2 =
1325 ntohl(ip6h->daddr.s6_addr32[2]);
1326 entry.ipv4_dslite.tunnel_dipv6_3 =
1327 ntohl(ip6h->daddr.s6_addr32[3]);
1328
1329 ppe_fill_flow_lbl(&entry, ip6h);
1330
1331 entry.ipv4_dslite.priority = ip6h->priority;
1332 entry.ipv4_dslite.hop_limit = ip6h->hop_limit;
1333 entry.ipv4_dslite.vlan1 = hw_path->vlan_id;
1334 if (hnat_priv->data->per_flow_accounting)
1335 entry.ipv4_dslite.iblk2.mibf = 1;
1336 } else if (mape_toggle &&
1337 entry.bfib1.pkt_type == IPV4_HNAPT) {
1338 /* MapE LAN -> WAN */
1339 mape = 1;
1340 entry.ipv4_hnapt.iblk2.dscp =
1341 foe->ipv4_hnapt.iblk2.dscp;
1342 if (hnat_priv->data->per_flow_accounting)
1343 entry.ipv4_hnapt.iblk2.mibf = 1;
1344
developerbb816412021-06-11 15:43:44 +08001345 if (IS_GMAC1_MODE)
1346 entry.ipv4_hnapt.vlan1 = 1;
1347 else
1348 entry.ipv4_hnapt.vlan1 = hw_path->vlan_id;
developerfd40db22021-04-29 10:08:25 +08001349
1350 entry.ipv4_hnapt.sip = foe->ipv4_hnapt.sip;
1351 entry.ipv4_hnapt.dip = foe->ipv4_hnapt.dip;
1352 entry.ipv4_hnapt.sport = foe->ipv4_hnapt.sport;
1353 entry.ipv4_hnapt.dport = foe->ipv4_hnapt.dport;
1354
1355 entry.ipv4_hnapt.new_sip =
1356 foe->ipv4_hnapt.new_sip;
1357 entry.ipv4_hnapt.new_dip =
1358 foe->ipv4_hnapt.new_dip;
1359 entry.ipv4_hnapt.etype = htons(ETH_P_IP);
1360
developer34028fb2022-01-11 13:51:29 +08001361 if (IS_HQOS_MODE) {
developeraf07fad2021-11-19 17:53:42 +08001362 entry.ipv4_hnapt.iblk2.qid =
1363 (hnat_priv->data->version == MTK_HNAT_V4) ?
1364 skb->mark & 0x7f : skb->mark & 0xf;
1365 entry.ipv4_hnapt.iblk2.fqos = 1;
1366 }
developerfd40db22021-04-29 10:08:25 +08001367
1368 entry.ipv4_hnapt.bfib1.udp =
1369 foe->ipv4_hnapt.bfib1.udp;
1370
1371 entry.ipv4_hnapt.new_sport =
1372 foe->ipv4_hnapt.new_sport;
1373 entry.ipv4_hnapt.new_dport =
1374 foe->ipv4_hnapt.new_dport;
1375 mape_l2w_v6h = *ip6h;
1376 }
1377 break;
1378
1379 default:
1380 return -1;
1381 }
1382
1383 trace_printk(
1384 "[%s]skb->head=%p, skb->data=%p,ipv6_hdr=%p, skb->len=%d, skb->data_len=%d\n",
1385 __func__, skb->head, skb->data, ip6h, skb->len,
1386 skb->data_len);
1387 break;
1388
1389 default:
developerfd40db22021-04-29 10:08:25 +08001390 iph = ip_hdr(skb);
1391 switch (entry.bfib1.pkt_type) {
1392 case IPV6_6RD: /* 6RD LAN->WAN */
1393 entry.ipv6_6rd.ipv6_sip0 = foe->ipv6_6rd.ipv6_sip0;
1394 entry.ipv6_6rd.ipv6_sip1 = foe->ipv6_6rd.ipv6_sip1;
1395 entry.ipv6_6rd.ipv6_sip2 = foe->ipv6_6rd.ipv6_sip2;
1396 entry.ipv6_6rd.ipv6_sip3 = foe->ipv6_6rd.ipv6_sip3;
1397
1398 entry.ipv6_6rd.ipv6_dip0 = foe->ipv6_6rd.ipv6_dip0;
1399 entry.ipv6_6rd.ipv6_dip1 = foe->ipv6_6rd.ipv6_dip1;
1400 entry.ipv6_6rd.ipv6_dip2 = foe->ipv6_6rd.ipv6_dip2;
1401 entry.ipv6_6rd.ipv6_dip3 = foe->ipv6_6rd.ipv6_dip3;
1402
1403 entry.ipv6_6rd.sport = foe->ipv6_6rd.sport;
1404 entry.ipv6_6rd.dport = foe->ipv6_6rd.dport;
1405 entry.ipv6_6rd.tunnel_sipv4 = ntohl(iph->saddr);
1406 entry.ipv6_6rd.tunnel_dipv4 = ntohl(iph->daddr);
1407 entry.ipv6_6rd.hdr_chksum = ppe_get_chkbase(iph);
1408 entry.ipv6_6rd.flag = (ntohs(iph->frag_off) >> 13);
1409 entry.ipv6_6rd.ttl = iph->ttl;
1410 entry.ipv6_6rd.dscp = iph->tos;
1411 entry.ipv6_6rd.per_flow_6rd_id = 1;
1412 entry.ipv6_6rd.vlan1 = hw_path->vlan_id;
1413 if (hnat_priv->data->per_flow_accounting)
1414 entry.ipv6_6rd.iblk2.mibf = 1;
1415 break;
1416
1417 default:
1418 return -1;
1419 }
1420 }
1421
1422 /* Fill Layer2 Info.*/
1423 entry = ppe_fill_L2_info(eth, entry, hw_path);
1424
1425 /* Fill Info Blk*/
1426 entry = ppe_fill_info_blk(eth, entry, hw_path);
1427
1428 if (IS_LAN(dev)) {
1429 if (IS_DSA_LAN(dev))
developeraf07fad2021-11-19 17:53:42 +08001430 port_id = hnat_dsa_fill_stag(dev, &entry, hw_path,
1431 ntohs(eth->h_proto),
1432 mape);
developerfd40db22021-04-29 10:08:25 +08001433
1434 if (IS_BOND_MODE)
1435 gmac = ((skb_hnat_entry(skb) >> 1) % hnat_priv->gmac_num) ?
1436 NR_GMAC2_PORT : NR_GMAC1_PORT;
1437 else
1438 gmac = NR_GMAC1_PORT;
1439 } else if (IS_WAN(dev)) {
1440 if (IS_DSA_WAN(dev))
developeraf07fad2021-11-19 17:53:42 +08001441 port_id = hnat_dsa_fill_stag(dev,&entry, hw_path,
1442 ntohs(eth->h_proto),
1443 mape);
developerfd40db22021-04-29 10:08:25 +08001444 if (mape_toggle && mape == 1) {
1445 gmac = NR_PDMA_PORT;
1446 /* Set act_dp = wan_dev */
1447 entry.ipv4_hnapt.act_dp = dev->ifindex;
1448 } else {
1449 gmac = (IS_GMAC1_MODE) ? NR_GMAC1_PORT : NR_GMAC2_PORT;
1450 }
1451 } else if (IS_EXT(dev) && (FROM_GE_PPD(skb) || FROM_GE_LAN(skb) ||
developer99506e52021-06-30 22:03:02 +08001452 FROM_GE_WAN(skb) || FROM_GE_VIRTUAL(skb) || FROM_WED(skb))) {
developerfd40db22021-04-29 10:08:25 +08001453 if (!hnat_priv->data->whnat && IS_GMAC1_MODE) {
1454 entry.bfib1.vpm = 1;
1455 entry.bfib1.vlan_layer = 1;
1456
1457 if (FROM_GE_LAN(skb))
1458 entry.ipv4_hnapt.vlan1 = 1;
1459 else if (FROM_GE_WAN(skb) || FROM_GE_VIRTUAL(skb))
1460 entry.ipv4_hnapt.vlan1 = 2;
1461 }
1462
1463 trace_printk("learn of lan or wan(iif=%x) --> %s(ext)\n",
1464 skb_hnat_iface(skb), dev->name);
1465 /* To CPU then stolen by pre-routing hant hook of LAN/WAN
1466 * Current setting is PDMA RX.
1467 */
1468 gmac = NR_PDMA_PORT;
1469 if (IS_IPV4_GRP(foe))
1470 entry.ipv4_hnapt.act_dp = dev->ifindex;
1471 else
1472 entry.ipv6_5t_route.act_dp = dev->ifindex;
1473 } else {
1474 printk_ratelimited(KERN_WARNING
1475 "Unknown case of dp, iif=%x --> %s\n",
1476 skb_hnat_iface(skb), dev->name);
1477
1478 return 0;
1479 }
1480
developeraf07fad2021-11-19 17:53:42 +08001481 if (IS_HQOS_MODE)
1482 qid = skb->mark & (MTK_QDMA_TX_MASK);
developer34028fb2022-01-11 13:51:29 +08001483 else if (IS_PPPQ_MODE && (IS_DSA_LAN(dev) || IS_DSA_WAN(dev)))
developeraf07fad2021-11-19 17:53:42 +08001484 qid = port_id & MTK_QDMA_TX_MASK;
1485 else
1486 qid = 0;
developerfd40db22021-04-29 10:08:25 +08001487
1488 if (IS_IPV4_GRP(foe)) {
1489 entry.ipv4_hnapt.iblk2.dp = gmac;
1490 entry.ipv4_hnapt.iblk2.port_mg =
1491 (hnat_priv->data->version == MTK_HNAT_V1) ? 0x3f : 0;
developer24948202021-11-24 17:38:27 +08001492
developeraf07fad2021-11-19 17:53:42 +08001493 if (qos_toggle) {
1494 if (hnat_priv->data->version == MTK_HNAT_V4) {
1495 entry.ipv4_hnapt.iblk2.qid = qid & 0x7f;
1496 } else {
1497 /* qid[5:0]= port_mg[1:0]+ qid[3:0] */
1498 entry.ipv4_hnapt.iblk2.qid = qid & 0xf;
1499 if (hnat_priv->data->version != MTK_HNAT_V1)
1500 entry.ipv4_hnapt.iblk2.port_mg |=
1501 ((qid >> 4) & 0x3);
developerfd40db22021-04-29 10:08:25 +08001502
developeraf07fad2021-11-19 17:53:42 +08001503 if (((IS_EXT(dev) && (FROM_GE_LAN(skb) ||
1504 FROM_GE_WAN(skb) || FROM_GE_VIRTUAL(skb))) ||
1505 ((mape_toggle && mape == 1) && !FROM_EXT(skb))) &&
1506 (!whnat)) {
1507 entry.ipv4_hnapt.etype = htons(HQOS_MAGIC_TAG);
1508 entry.ipv4_hnapt.vlan1 = skb_hnat_entry(skb);
1509 entry.bfib1.vlan_layer = 1;
1510 }
developerfd40db22021-04-29 10:08:25 +08001511 }
developerfd40db22021-04-29 10:08:25 +08001512
developer34028fb2022-01-11 13:51:29 +08001513 if (FROM_EXT(skb) || skb_hnat_sport(skb) == NR_QDMA_PORT ||
1514 (IS_PPPQ_MODE && !IS_DSA_LAN(dev) && !IS_DSA_WAN(dev)))
developeraf07fad2021-11-19 17:53:42 +08001515 entry.ipv4_hnapt.iblk2.fqos = 0;
1516 else
1517 entry.ipv4_hnapt.iblk2.fqos = 1;
1518 } else {
developerfd40db22021-04-29 10:08:25 +08001519 entry.ipv4_hnapt.iblk2.fqos = 0;
developeraf07fad2021-11-19 17:53:42 +08001520 }
developerfd40db22021-04-29 10:08:25 +08001521 } else {
1522 entry.ipv6_5t_route.iblk2.dp = gmac;
1523 entry.ipv6_5t_route.iblk2.port_mg =
1524 (hnat_priv->data->version == MTK_HNAT_V1) ? 0x3f : 0;
developer24948202021-11-24 17:38:27 +08001525
developeraf07fad2021-11-19 17:53:42 +08001526 if (qos_toggle) {
1527 if (hnat_priv->data->version == MTK_HNAT_V4) {
1528 entry.ipv6_5t_route.iblk2.qid = qid & 0x7f;
1529 } else {
1530 /* qid[5:0]= port_mg[1:0]+ qid[3:0] */
1531 entry.ipv6_5t_route.iblk2.qid = qid & 0xf;
1532 if (hnat_priv->data->version != MTK_HNAT_V1)
1533 entry.ipv6_5t_route.iblk2.port_mg |=
1534 ((qid >> 4) & 0x3);
developerfd40db22021-04-29 10:08:25 +08001535
developeraf07fad2021-11-19 17:53:42 +08001536 if (IS_EXT(dev) && (FROM_GE_LAN(skb) ||
1537 FROM_GE_WAN(skb) || FROM_GE_VIRTUAL(skb)) &&
1538 (!whnat)) {
1539 entry.ipv6_5t_route.etype = htons(HQOS_MAGIC_TAG);
1540 entry.ipv6_5t_route.vlan1 = skb_hnat_entry(skb);
1541 entry.bfib1.vlan_layer = 1;
1542 }
developerfd40db22021-04-29 10:08:25 +08001543 }
developerfd40db22021-04-29 10:08:25 +08001544
developer34028fb2022-01-11 13:51:29 +08001545 if (FROM_EXT(skb) ||
1546 (IS_PPPQ_MODE && !IS_DSA_LAN(dev) && !IS_DSA_WAN(dev)))
developeraf07fad2021-11-19 17:53:42 +08001547 entry.ipv6_5t_route.iblk2.fqos = 0;
1548 else
1549 entry.ipv6_5t_route.iblk2.fqos = 1;
1550 } else {
developerfd40db22021-04-29 10:08:25 +08001551 entry.ipv6_5t_route.iblk2.fqos = 0;
developeraf07fad2021-11-19 17:53:42 +08001552 }
developerfd40db22021-04-29 10:08:25 +08001553 }
1554
developer60e60962021-06-15 21:05:07 +08001555 /* The INFO2.port_mg and 2nd VLAN ID fields of PPE entry are redefined
1556 * by Wi-Fi whnat engine. These data and INFO2.dp will be updated and
1557 * the entry is set to BIND state in mtk_sw_nat_hook_tx().
1558 */
1559 if (!whnat)
1560 entry.bfib1.state = BIND;
1561
developerbc552cc2022-03-15 16:19:27 +08001562 wmb();
developerfd40db22021-04-29 10:08:25 +08001563 memcpy(foe, &entry, sizeof(entry));
1564 /*reset statistic for this entry*/
1565 if (hnat_priv->data->per_flow_accounting)
developer471f6562021-05-10 20:48:34 +08001566 memset(&hnat_priv->acct[skb_hnat_ppe(skb)][skb_hnat_entry(skb)],
1567 0, sizeof(struct mib_entry));
developerfd40db22021-04-29 10:08:25 +08001568
developerfdfe1572021-09-13 16:56:33 +08001569 skb_hnat_filled(skb) = HNAT_INFO_FILLED;
developerfd40db22021-04-29 10:08:25 +08001570
1571 return 0;
1572}
1573
1574int mtk_sw_nat_hook_tx(struct sk_buff *skb, int gmac_no)
1575{
1576 struct foe_entry *entry;
1577 struct ethhdr *eth;
developerbc552cc2022-03-15 16:19:27 +08001578 struct hnat_bind_info_blk bfib1_tx;
developerfd40db22021-04-29 10:08:25 +08001579
developerfdfe1572021-09-13 16:56:33 +08001580 if (skb_hnat_alg(skb) || !is_hnat_info_filled(skb) ||
1581 !is_magic_tag_valid(skb) || !IS_SPACE_AVAILABLE_HEAD(skb))
developerfd40db22021-04-29 10:08:25 +08001582 return NF_ACCEPT;
1583
1584 trace_printk(
1585 "[%s]entry=%x reason=%x gmac_no=%x wdmaid=%x rxid=%x wcid=%x bssid=%x\n",
1586 __func__, skb_hnat_entry(skb), skb_hnat_reason(skb), gmac_no,
1587 skb_hnat_wdma_id(skb), skb_hnat_bss_id(skb),
1588 skb_hnat_wc_id(skb), skb_hnat_rx_id(skb));
1589
developer99506e52021-06-30 22:03:02 +08001590 if ((gmac_no != NR_WDMA0_PORT) && (gmac_no != NR_WDMA1_PORT) &&
1591 (gmac_no != NR_WHNAT_WDMA_PORT))
1592 return NF_ACCEPT;
1593
developerfd40db22021-04-29 10:08:25 +08001594 if (!skb_hnat_is_hashed(skb))
1595 return NF_ACCEPT;
1596
developer955a6f62021-07-26 10:54:39 +08001597 if (skb_hnat_entry(skb) >= hnat_priv->foe_etry_num ||
1598 skb_hnat_ppe(skb) >= CFG_PPE_NUM)
1599 return NF_ACCEPT;
1600
developer471f6562021-05-10 20:48:34 +08001601 entry = &hnat_priv->foe_table_cpu[skb_hnat_ppe(skb)][skb_hnat_entry(skb)];
developerfd40db22021-04-29 10:08:25 +08001602 if (entry_hnat_is_bound(entry))
1603 return NF_ACCEPT;
1604
1605 if (skb_hnat_reason(skb) != HIT_UNBIND_RATE_REACH)
1606 return NF_ACCEPT;
1607
1608 eth = eth_hdr(skb);
developerbc552cc2022-03-15 16:19:27 +08001609 memcpy(&bfib1_tx, &entry->bfib1, sizeof(entry->bfib1));
developer8116b0a2021-08-23 18:07:20 +08001610
1611 /*not bind multicast if PPE mcast not enable*/
developerfdfe1572021-09-13 16:56:33 +08001612 if (!hnat_priv->data->mcast) {
1613 if (is_multicast_ether_addr(eth->h_dest))
1614 return NF_ACCEPT;
1615
1616 if (IS_IPV4_GRP(entry))
1617 entry->ipv4_hnapt.iblk2.mcast = 0;
1618 else
1619 entry->ipv6_5t_route.iblk2.mcast = 0;
1620 }
developerfd40db22021-04-29 10:08:25 +08001621
1622 /* Some mt_wifi virtual interfaces, such as apcli,
1623 * will change the smac for specail purpose.
1624 */
developerbc552cc2022-03-15 16:19:27 +08001625 switch (bfib1_tx.pkt_type) {
developerfd40db22021-04-29 10:08:25 +08001626 case IPV4_HNAPT:
1627 case IPV4_HNAT:
1628 entry->ipv4_hnapt.smac_hi = swab32(*((u32 *)eth->h_source));
1629 entry->ipv4_hnapt.smac_lo = swab16(*((u16 *)&eth->h_source[4]));
1630 break;
1631 case IPV4_DSLITE:
1632 case IPV4_MAP_E:
1633 case IPV6_6RD:
1634 case IPV6_5T_ROUTE:
1635 case IPV6_3T_ROUTE:
1636 entry->ipv6_5t_route.smac_hi = swab32(*((u32 *)eth->h_source));
1637 entry->ipv6_5t_route.smac_lo = swab16(*((u16 *)&eth->h_source[4]));
1638 break;
1639 }
1640
developer0ff76882021-10-26 10:54:13 +08001641 if (skb->vlan_tci) {
developerbc552cc2022-03-15 16:19:27 +08001642 bfib1_tx.vlan_layer = 1;
1643 bfib1_tx.vpm = 1;
developer0ff76882021-10-26 10:54:13 +08001644 if (IS_IPV4_GRP(entry)) {
1645 entry->ipv4_hnapt.etype = htons(ETH_P_8021Q);
developer00a07372022-03-11 16:04:34 +08001646 entry->ipv4_hnapt.vlan1 = skb->vlan_tci;
developer0ff76882021-10-26 10:54:13 +08001647 } else if (IS_IPV6_GRP(entry)) {
1648 entry->ipv6_5t_route.etype = htons(ETH_P_8021Q);
developer00a07372022-03-11 16:04:34 +08001649 entry->ipv6_5t_route.vlan1 = skb->vlan_tci;
developer0ff76882021-10-26 10:54:13 +08001650 }
1651 } else {
developerbc552cc2022-03-15 16:19:27 +08001652 bfib1_tx.vpm = 0;
1653 bfib1_tx.vlan_layer = 0;
developer0ff76882021-10-26 10:54:13 +08001654 }
developer60e60962021-06-15 21:05:07 +08001655
developerfd40db22021-04-29 10:08:25 +08001656 /* MT7622 wifi hw_nat not support QoS */
1657 if (IS_IPV4_GRP(entry)) {
1658 entry->ipv4_hnapt.iblk2.fqos = 0;
developere567ad32021-05-25 17:16:17 +08001659 if ((hnat_priv->data->version == MTK_HNAT_V2 &&
1660 gmac_no == NR_WHNAT_WDMA_PORT) ||
1661 (hnat_priv->data->version == MTK_HNAT_V4 &&
1662 (gmac_no == NR_WDMA0_PORT || gmac_no == NR_WDMA1_PORT))) {
developerfd40db22021-04-29 10:08:25 +08001663 entry->ipv4_hnapt.winfo.bssid = skb_hnat_bss_id(skb);
1664 entry->ipv4_hnapt.winfo.wcid = skb_hnat_wc_id(skb);
1665#if defined(CONFIG_MEDIATEK_NETSYS_V2)
1666 entry->ipv4_hnapt.iblk2.rxid = skb_hnat_rx_id(skb);
1667 entry->ipv4_hnapt.iblk2.winfoi = 1;
1668#else
1669 entry->ipv4_hnapt.winfo.rxid = skb_hnat_rx_id(skb);
1670 entry->ipv4_hnapt.iblk2w.winfoi = 1;
1671 entry->ipv4_hnapt.iblk2w.wdmaid = skb_hnat_wdma_id(skb);
1672#endif
1673 } else {
1674 if (IS_GMAC1_MODE && !hnat_dsa_is_enable(hnat_priv)) {
developerbc552cc2022-03-15 16:19:27 +08001675 bfib1_tx.vpm = 1;
1676 bfib1_tx.vlan_layer = 1;
developerfd40db22021-04-29 10:08:25 +08001677
1678 if (FROM_GE_LAN(skb))
1679 entry->ipv4_hnapt.vlan1 = 1;
1680 else if (FROM_GE_WAN(skb) || FROM_GE_VIRTUAL(skb))
1681 entry->ipv4_hnapt.vlan1 = 2;
1682 }
1683
developer34028fb2022-01-11 13:51:29 +08001684 if (IS_HQOS_MODE &&
developeraf07fad2021-11-19 17:53:42 +08001685 (FROM_GE_LAN(skb) || FROM_GE_WAN(skb) || FROM_GE_VIRTUAL(skb))) {
developerbc552cc2022-03-15 16:19:27 +08001686 bfib1_tx.vpm = 0;
1687 bfib1_tx.vlan_layer = 1;
developerfd40db22021-04-29 10:08:25 +08001688 entry->ipv4_hnapt.etype = htons(HQOS_MAGIC_TAG);
1689 entry->ipv4_hnapt.vlan1 = skb_hnat_entry(skb);
1690 entry->ipv4_hnapt.iblk2.fqos = 1;
1691 }
developerfd40db22021-04-29 10:08:25 +08001692 }
1693 entry->ipv4_hnapt.iblk2.dp = gmac_no;
1694 } else {
1695 entry->ipv6_5t_route.iblk2.fqos = 0;
developere567ad32021-05-25 17:16:17 +08001696 if ((hnat_priv->data->version == MTK_HNAT_V2 &&
1697 gmac_no == NR_WHNAT_WDMA_PORT) ||
1698 (hnat_priv->data->version == MTK_HNAT_V4 &&
1699 (gmac_no == NR_WDMA0_PORT || gmac_no == NR_WDMA1_PORT))) {
developerfd40db22021-04-29 10:08:25 +08001700 entry->ipv6_5t_route.winfo.bssid = skb_hnat_bss_id(skb);
1701 entry->ipv6_5t_route.winfo.wcid = skb_hnat_wc_id(skb);
1702#if defined(CONFIG_MEDIATEK_NETSYS_V2)
1703 entry->ipv6_5t_route.iblk2.rxid = skb_hnat_rx_id(skb);
1704 entry->ipv6_5t_route.iblk2.winfoi = 1;
1705#else
1706 entry->ipv6_5t_route.winfo.rxid = skb_hnat_rx_id(skb);
1707 entry->ipv6_5t_route.iblk2w.winfoi = 1;
1708 entry->ipv6_5t_route.iblk2w.wdmaid = skb_hnat_wdma_id(skb);
1709#endif
1710 } else {
1711 if (IS_GMAC1_MODE && !hnat_dsa_is_enable(hnat_priv)) {
developerbc552cc2022-03-15 16:19:27 +08001712 bfib1_tx.vpm = 1;
1713 bfib1_tx.vlan_layer = 1;
developerfd40db22021-04-29 10:08:25 +08001714
1715 if (FROM_GE_LAN(skb))
1716 entry->ipv6_5t_route.vlan1 = 1;
1717 else if (FROM_GE_WAN(skb) || FROM_GE_VIRTUAL(skb))
1718 entry->ipv6_5t_route.vlan1 = 2;
1719 }
1720
developer34028fb2022-01-11 13:51:29 +08001721 if (IS_HQOS_MODE &&
developeraf07fad2021-11-19 17:53:42 +08001722 (FROM_GE_LAN(skb) || FROM_GE_WAN(skb) || FROM_GE_VIRTUAL(skb))) {
developerbc552cc2022-03-15 16:19:27 +08001723 bfib1_tx.vpm = 0;
1724 bfib1_tx.vlan_layer = 1;
developerfd40db22021-04-29 10:08:25 +08001725 entry->ipv6_5t_route.etype = htons(HQOS_MAGIC_TAG);
1726 entry->ipv6_5t_route.vlan1 = skb_hnat_entry(skb);
1727 entry->ipv6_5t_route.iblk2.fqos = 1;
1728 }
developerfd40db22021-04-29 10:08:25 +08001729 }
1730 entry->ipv6_5t_route.iblk2.dp = gmac_no;
1731 }
1732
developerbc552cc2022-03-15 16:19:27 +08001733 bfib1_tx.state = BIND;
1734 wmb();
1735 memcpy(&entry->bfib1, &bfib1_tx, sizeof(bfib1_tx));
developerfd40db22021-04-29 10:08:25 +08001736
1737 return NF_ACCEPT;
1738}
1739
1740int mtk_sw_nat_hook_rx(struct sk_buff *skb)
1741{
developer99506e52021-06-30 22:03:02 +08001742 if (!IS_SPACE_AVAILABLE_HEAD(skb) || !FROM_WED(skb)) {
1743 skb_hnat_magic_tag(skb) = 0;
developerfd40db22021-04-29 10:08:25 +08001744 return NF_ACCEPT;
developer99506e52021-06-30 22:03:02 +08001745 }
developerfd40db22021-04-29 10:08:25 +08001746
1747 skb_hnat_alg(skb) = 0;
developerfdfe1572021-09-13 16:56:33 +08001748 skb_hnat_filled(skb) = 0;
developerfd40db22021-04-29 10:08:25 +08001749 skb_hnat_magic_tag(skb) = HNAT_MAGIC_TAG;
1750
1751 if (skb_hnat_iface(skb) == FOE_MAGIC_WED0)
1752 skb_hnat_sport(skb) = NR_WDMA0_PORT;
1753 else if (skb_hnat_iface(skb) == FOE_MAGIC_WED1)
1754 skb_hnat_sport(skb) = NR_WDMA1_PORT;
1755
1756 return NF_ACCEPT;
1757}
1758
1759void mtk_ppe_dev_register_hook(struct net_device *dev)
1760{
1761 int i, number = 0;
1762 struct extdev_entry *ext_entry;
1763
developerfd40db22021-04-29 10:08:25 +08001764 for (i = 1; i < MAX_IF_NUM; i++) {
1765 if (hnat_priv->wifi_hook_if[i] == dev) {
1766 pr_info("%s : %s has been registered in wifi_hook_if table[%d]\n",
1767 __func__, dev->name, i);
1768 return;
1769 }
1770 if (!hnat_priv->wifi_hook_if[i]) {
1771 if (find_extif_from_devname(dev->name)) {
1772 extif_set_dev(dev);
1773 goto add_wifi_hook_if;
1774 }
1775
1776 number = get_ext_device_number();
1777 if (number >= MAX_EXT_DEVS) {
1778 pr_info("%s : extdev array is full. %s is not registered\n",
1779 __func__, dev->name);
1780 return;
1781 }
1782
1783 ext_entry = kzalloc(sizeof(*ext_entry), GFP_KERNEL);
1784 if (!ext_entry)
1785 return;
1786
developer4c32b7a2021-11-13 16:46:43 +08001787 strncpy(ext_entry->name, dev->name, IFNAMSIZ - 1);
developerfd40db22021-04-29 10:08:25 +08001788 dev_hold(dev);
1789 ext_entry->dev = dev;
1790 ext_if_add(ext_entry);
1791
1792add_wifi_hook_if:
1793 dev_hold(dev);
1794 hnat_priv->wifi_hook_if[i] = dev;
1795
1796 break;
1797 }
1798 }
1799 pr_info("%s : ineterface %s register (%d)\n", __func__, dev->name, i);
1800}
1801
1802void mtk_ppe_dev_unregister_hook(struct net_device *dev)
1803{
1804 int i;
1805
1806 for (i = 1; i < MAX_IF_NUM; i++) {
1807 if (hnat_priv->wifi_hook_if[i] == dev) {
1808 hnat_priv->wifi_hook_if[i] = NULL;
1809 dev_put(dev);
1810
1811 break;
1812 }
1813 }
1814
1815 extif_put_dev(dev);
1816 pr_info("%s : ineterface %s set null (%d)\n", __func__, dev->name, i);
1817}
1818
1819static unsigned int mtk_hnat_accel_type(struct sk_buff *skb)
1820{
1821 struct dst_entry *dst;
1822 struct nf_conn *ct;
1823 enum ip_conntrack_info ctinfo;
1824 const struct nf_conn_help *help;
1825
1826 /* Do not accelerate 1st round of xfrm flow, and 2nd round of xfrm flow
1827 * is from local_out which is also filtered in sanity check.
1828 */
1829 dst = skb_dst(skb);
1830 if (dst && dst_xfrm(dst))
1831 return 0;
1832
1833 ct = nf_ct_get(skb, &ctinfo);
1834 if (!ct)
1835 return 1;
1836
1837 /* rcu_read_lock()ed by nf_hook_slow */
1838 help = nfct_help(ct);
1839 if (help && rcu_dereference(help->helper))
1840 return 0;
1841
1842 return 1;
1843}
1844
developer6f4a0c72021-10-19 10:04:22 +08001845static void mtk_hnat_dscp_update(struct sk_buff *skb, struct foe_entry *entry)
1846{
1847 struct iphdr *iph;
1848 struct ethhdr *eth;
1849 struct ipv6hdr *ip6h;
1850 bool flag = false;
1851
1852 eth = eth_hdr(skb);
1853 switch (ntohs(eth->h_proto)) {
1854 case ETH_P_IP:
1855 iph = ip_hdr(skb);
developer001e7be2021-12-09 15:00:27 +08001856 if (IS_IPV4_GRP(entry) && entry->ipv4_hnapt.iblk2.dscp != iph->tos)
developer6f4a0c72021-10-19 10:04:22 +08001857 flag = true;
1858 break;
1859 case ETH_P_IPV6:
1860 ip6h = ipv6_hdr(skb);
developer001e7be2021-12-09 15:00:27 +08001861 if ((IS_IPV6_3T_ROUTE(entry) || IS_IPV6_5T_ROUTE(entry)) &&
1862 (entry->ipv6_5t_route.iblk2.dscp !=
1863 (ip6h->priority << 4 | (ip6h->flow_lbl[0] >> 4))))
developer6f4a0c72021-10-19 10:04:22 +08001864 flag = true;
1865 break;
1866 default:
1867 return;
1868 }
1869
1870 if (flag) {
developer1080dd82022-03-07 19:31:04 +08001871 if (debug_level >= 2)
1872 pr_info("Delete entry idx=%d.\n", skb_hnat_entry(skb));
developer6f4a0c72021-10-19 10:04:22 +08001873 memset(entry, 0, sizeof(struct foe_entry));
1874 hnat_cache_ebl(1);
1875 }
1876}
1877
developer30a47682021-11-02 17:06:14 +08001878static void mtk_hnat_nf_update(struct sk_buff *skb)
1879{
1880 struct nf_conn *ct;
1881 struct nf_conn_acct *acct;
1882 struct nf_conn_counter *counter;
1883 enum ip_conntrack_info ctinfo;
1884 struct hnat_accounting diff;
1885
1886 ct = nf_ct_get(skb, &ctinfo);
1887 if (ct) {
1888 if (!hnat_get_count(hnat_priv, skb_hnat_ppe(skb), skb_hnat_entry(skb), &diff))
1889 return;
1890
1891 acct = nf_conn_acct_find(ct);
1892 if (acct) {
1893 counter = acct->counter;
1894 atomic64_add(diff.packets, &counter[CTINFO2DIR(ctinfo)].packets);
1895 atomic64_add(diff.bytes, &counter[CTINFO2DIR(ctinfo)].bytes);
1896 }
1897 }
1898}
1899
developerfd40db22021-04-29 10:08:25 +08001900static unsigned int mtk_hnat_nf_post_routing(
1901 struct sk_buff *skb, const struct net_device *out,
1902 unsigned int (*fn)(struct sk_buff *, const struct net_device *,
1903 struct flow_offload_hw_path *),
1904 const char *func)
1905{
1906 struct foe_entry *entry;
1907 struct flow_offload_hw_path hw_path = { .dev = (struct net_device*)out,
developere5763512021-05-21 01:04:58 +08001908 .virt_dev = (struct net_device*)out };
developerfd40db22021-04-29 10:08:25 +08001909 const struct net_device *arp_dev = out;
1910
1911 if (skb_hnat_alg(skb) || unlikely(!is_magic_tag_valid(skb) ||
1912 !IS_SPACE_AVAILABLE_HEAD(skb)))
1913 return 0;
1914
1915 if (unlikely(!skb_hnat_is_hashed(skb)))
1916 return 0;
1917
1918 if (out->netdev_ops->ndo_flow_offload_check) {
developere5763512021-05-21 01:04:58 +08001919 out->netdev_ops->ndo_flow_offload_check(&hw_path);
developerfd40db22021-04-29 10:08:25 +08001920 out = (IS_GMAC1_MODE) ? hw_path.virt_dev : hw_path.dev;
1921 }
1922
1923 if (!IS_LAN(out) && !IS_WAN(out) && !IS_EXT(out))
1924 return 0;
1925
1926 trace_printk("[%s] case hit, %x-->%s, reason=%x\n", __func__,
1927 skb_hnat_iface(skb), out->name, skb_hnat_reason(skb));
1928
developer471f6562021-05-10 20:48:34 +08001929 entry = &hnat_priv->foe_table_cpu[skb_hnat_ppe(skb)][skb_hnat_entry(skb)];
developerfd40db22021-04-29 10:08:25 +08001930
1931 switch (skb_hnat_reason(skb)) {
1932 case HIT_UNBIND_RATE_REACH:
1933 if (entry_hnat_is_bound(entry))
1934 break;
1935
1936 if (fn && !mtk_hnat_accel_type(skb))
1937 break;
1938
1939 if (fn && fn(skb, arp_dev, &hw_path))
1940 break;
1941
1942 skb_to_hnat_info(skb, out, entry, &hw_path);
1943 break;
1944 case HIT_BIND_KEEPALIVE_DUP_OLD_HDR:
developer30a47682021-11-02 17:06:14 +08001945 /* update hnat count to nf_conntrack by keepalive */
1946 if (hnat_priv->data->per_flow_accounting && hnat_priv->nf_stat_en)
1947 mtk_hnat_nf_update(skb);
1948
developerfd40db22021-04-29 10:08:25 +08001949 if (fn && !mtk_hnat_accel_type(skb))
1950 break;
1951
developer6f4a0c72021-10-19 10:04:22 +08001952 /* update dscp for qos */
1953 mtk_hnat_dscp_update(skb, entry);
1954
developerfd40db22021-04-29 10:08:25 +08001955 /* update mcast timestamp*/
1956 if (hnat_priv->data->version == MTK_HNAT_V3 &&
1957 hnat_priv->data->mcast && entry->bfib1.sta == 1)
1958 entry->ipv4_hnapt.m_timestamp = foe_timestamp(hnat_priv);
1959
1960 if (entry_hnat_is_bound(entry)) {
1961 memset(skb_hnat_info(skb), 0, FOE_INFO_LEN);
1962
1963 return -1;
1964 }
1965 break;
1966 case HIT_BIND_MULTICAST_TO_CPU:
1967 case HIT_BIND_MULTICAST_TO_GMAC_CPU:
1968 /*do not forward to gdma again,if ppe already done it*/
1969 if (IS_LAN(out) || IS_WAN(out))
1970 return -1;
1971 break;
1972 }
1973
1974 return 0;
1975}
1976
1977static unsigned int
1978mtk_hnat_ipv6_nf_local_out(void *priv, struct sk_buff *skb,
1979 const struct nf_hook_state *state)
1980{
1981 struct foe_entry *entry;
1982 struct ipv6hdr *ip6h;
1983 struct iphdr _iphdr;
1984 const struct iphdr *iph;
1985 struct tcpudphdr _ports;
1986 const struct tcpudphdr *pptr;
1987 int udp = 0;
1988
1989 if (unlikely(!skb_hnat_is_hashed(skb)))
1990 return NF_ACCEPT;
1991
developer471f6562021-05-10 20:48:34 +08001992 entry = &hnat_priv->foe_table_cpu[skb_hnat_ppe(skb)][skb_hnat_entry(skb)];
developerfd40db22021-04-29 10:08:25 +08001993 if (skb_hnat_reason(skb) == HIT_UNBIND_RATE_REACH) {
1994 ip6h = ipv6_hdr(skb);
1995 if (ip6h->nexthdr == NEXTHDR_IPIP) {
1996 /* Map-E LAN->WAN: need to record orig info before fn. */
1997 if (mape_toggle) {
1998 iph = skb_header_pointer(skb, IPV6_HDR_LEN,
1999 sizeof(_iphdr), &_iphdr);
developer4c32b7a2021-11-13 16:46:43 +08002000 if (unlikely(!iph))
2001 return NF_ACCEPT;
2002
developerfd40db22021-04-29 10:08:25 +08002003 switch (iph->protocol) {
2004 case IPPROTO_UDP:
2005 udp = 1;
2006 case IPPROTO_TCP:
2007 break;
2008
2009 default:
2010 return NF_ACCEPT;
2011 }
2012
2013 pptr = skb_header_pointer(skb, IPV6_HDR_LEN + iph->ihl * 4,
2014 sizeof(_ports), &_ports);
developer4c32b7a2021-11-13 16:46:43 +08002015 if (unlikely(!pptr))
2016 return NF_ACCEPT;
2017
developerfd40db22021-04-29 10:08:25 +08002018 entry->bfib1.udp = udp;
2019
2020#if defined(CONFIG_MEDIATEK_NETSYS_V2)
2021 entry->bfib1.pkt_type = IPV4_MAP_E;
2022 entry->ipv4_dslite.iblk2.dscp = iph->tos;
2023 entry->ipv4_dslite.new_sip = ntohl(iph->saddr);
2024 entry->ipv4_dslite.new_dip = ntohl(iph->daddr);
2025 entry->ipv4_dslite.new_sport = ntohs(pptr->src);
2026 entry->ipv4_dslite.new_dport = ntohs(pptr->dst);
2027#else
2028 entry->ipv4_hnapt.iblk2.dscp = iph->tos;
2029 entry->ipv4_hnapt.new_sip = ntohl(iph->saddr);
2030 entry->ipv4_hnapt.new_dip = ntohl(iph->daddr);
2031 entry->ipv4_hnapt.new_sport = ntohs(pptr->src);
2032 entry->ipv4_hnapt.new_dport = ntohs(pptr->dst);
2033#endif
2034 } else {
2035 entry->bfib1.pkt_type = IPV4_DSLITE;
2036 }
2037 }
2038 }
2039 return NF_ACCEPT;
2040}
2041
2042static unsigned int
2043mtk_hnat_ipv6_nf_post_routing(void *priv, struct sk_buff *skb,
2044 const struct nf_hook_state *state)
2045{
2046 post_routing_print(skb, state->in, state->out, __func__);
2047
2048 if (!mtk_hnat_nf_post_routing(skb, state->out, hnat_ipv6_get_nexthop,
2049 __func__))
2050 return NF_ACCEPT;
2051
2052 trace_printk(
2053 "%s:drop (iif=0x%x, out_dev=%s, CB2=0x%x, ppe_hash=0x%x, sport=0x%x, reason=0x%x, alg=0x%x)\n",
2054 __func__, skb_hnat_iface(skb), state->out->name, HNAT_SKB_CB2(skb)->magic,
2055 skb_hnat_entry(skb), skb_hnat_sport(skb), skb_hnat_reason(skb),
2056 skb_hnat_alg(skb));
2057
2058 return NF_DROP;
2059}
2060
2061static unsigned int
2062mtk_hnat_ipv4_nf_post_routing(void *priv, struct sk_buff *skb,
2063 const struct nf_hook_state *state)
2064{
2065 post_routing_print(skb, state->in, state->out, __func__);
2066
2067 if (!mtk_hnat_nf_post_routing(skb, state->out, hnat_ipv4_get_nexthop,
2068 __func__))
2069 return NF_ACCEPT;
2070
2071 trace_printk(
2072 "%s:drop (iif=0x%x, out_dev=%s, CB2=0x%x, ppe_hash=0x%x, sport=0x%x, reason=0x%x, alg=0x%x)\n",
2073 __func__, skb_hnat_iface(skb), state->out->name, HNAT_SKB_CB2(skb)->magic,
2074 skb_hnat_entry(skb), skb_hnat_sport(skb), skb_hnat_reason(skb),
2075 skb_hnat_alg(skb));
2076
2077 return NF_DROP;
2078}
2079
2080static unsigned int
2081mtk_pong_hqos_handler(void *priv, struct sk_buff *skb,
2082 const struct nf_hook_state *state)
2083{
developerfd40db22021-04-29 10:08:25 +08002084 struct vlan_ethhdr *veth = (struct vlan_ethhdr *)skb_mac_header(skb);
2085
developer34028fb2022-01-11 13:51:29 +08002086 if (IS_HQOS_MODE && eth_hdr(skb)->h_proto == HQOS_MAGIC_TAG) {
developerfd40db22021-04-29 10:08:25 +08002087 skb_hnat_entry(skb) = ntohs(veth->h_vlan_TCI) & 0x3fff;
2088 skb_hnat_reason(skb) = HIT_BIND_FORCE_TO_CPU;
2089 }
developerfd40db22021-04-29 10:08:25 +08002090
2091 if (skb_hnat_iface(skb) == FOE_MAGIC_EXT)
2092 clr_from_extge(skb);
2093
2094 /* packets from external devices -> xxx ,step 2, learning stage */
developeraf07fad2021-11-19 17:53:42 +08002095 if (do_ext2ge_fast_learn(state->in, skb) && (!qos_toggle ||
2096 (qos_toggle && eth_hdr(skb)->h_proto != HQOS_MAGIC_TAG))) {
developerfd40db22021-04-29 10:08:25 +08002097 if (!do_hnat_ext_to_ge2(skb, __func__))
2098 return NF_STOLEN;
2099 goto drop;
2100 }
2101
2102 /* packets form ge -> external device */
2103 if (do_ge2ext_fast(state->in, skb)) {
2104 if (!do_hnat_ge_to_ext(skb, __func__))
2105 return NF_STOLEN;
2106 goto drop;
2107 }
2108
2109 return NF_ACCEPT;
2110drop:
2111 printk_ratelimited(KERN_WARNING
2112 "%s:drop (in_dev=%s, iif=0x%x, CB2=0x%x, ppe_hash=0x%x, sport=0x%x, reason=0x%x, alg=0x%x)\n",
2113 __func__, state->in->name, skb_hnat_iface(skb),
2114 HNAT_SKB_CB2(skb)->magic, skb_hnat_entry(skb),
2115 skb_hnat_sport(skb), skb_hnat_reason(skb),
2116 skb_hnat_alg(skb));
2117
2118 return NF_DROP;
2119}
2120
2121static unsigned int
2122mtk_hnat_br_nf_local_out(void *priv, struct sk_buff *skb,
2123 const struct nf_hook_state *state)
2124{
2125 post_routing_print(skb, state->in, state->out, __func__);
2126
2127 if (!mtk_hnat_nf_post_routing(skb, state->out, 0, __func__))
2128 return NF_ACCEPT;
2129
2130 trace_printk(
2131 "%s:drop (iif=0x%x, out_dev=%s, CB2=0x%x, ppe_hash=0x%x, sport=0x%x, reason=0x%x, alg=0x%x)\n",
2132 __func__, skb_hnat_iface(skb), state->out->name, HNAT_SKB_CB2(skb)->magic,
2133 skb_hnat_entry(skb), skb_hnat_sport(skb), skb_hnat_reason(skb),
2134 skb_hnat_alg(skb));
2135
2136 return NF_DROP;
2137}
2138
2139static unsigned int
2140mtk_hnat_ipv4_nf_local_out(void *priv, struct sk_buff *skb,
2141 const struct nf_hook_state *state)
2142{
2143 struct sk_buff *new_skb;
2144 struct foe_entry *entry;
2145 struct iphdr *iph;
2146
2147 if (!skb_hnat_is_hashed(skb))
2148 return NF_ACCEPT;
2149
developer471f6562021-05-10 20:48:34 +08002150 entry = &hnat_priv->foe_table_cpu[skb_hnat_ppe(skb)][skb_hnat_entry(skb)];
developerfd40db22021-04-29 10:08:25 +08002151
2152 if (unlikely(skb_headroom(skb) < FOE_INFO_LEN)) {
2153 new_skb = skb_realloc_headroom(skb, FOE_INFO_LEN);
2154 if (!new_skb) {
2155 dev_info(hnat_priv->dev, "%s:drop\n", __func__);
2156 return NF_DROP;
2157 }
2158 dev_kfree_skb(skb);
2159 skb = new_skb;
2160 }
2161
2162 /* Make the flow from local not be bound. */
2163 iph = ip_hdr(skb);
2164 if (iph->protocol == IPPROTO_IPV6) {
2165 entry->udib1.pkt_type = IPV6_6RD;
2166 hnat_set_head_frags(state, skb, 0, hnat_set_alg);
2167 } else {
2168 hnat_set_head_frags(state, skb, 1, hnat_set_alg);
2169 }
2170
2171 return NF_ACCEPT;
2172}
2173
2174static unsigned int mtk_hnat_br_nf_forward(void *priv,
2175 struct sk_buff *skb,
2176 const struct nf_hook_state *state)
2177{
developer99506e52021-06-30 22:03:02 +08002178 if ((hnat_priv->data->version == MTK_HNAT_V2) &&
2179 unlikely(IS_EXT(state->in) && IS_EXT(state->out)))
developerfd40db22021-04-29 10:08:25 +08002180 hnat_set_head_frags(state, skb, 1, hnat_set_alg);
2181
2182 return NF_ACCEPT;
2183}
2184
2185static struct nf_hook_ops mtk_hnat_nf_ops[] __read_mostly = {
2186 {
2187 .hook = mtk_hnat_ipv4_nf_pre_routing,
2188 .pf = NFPROTO_IPV4,
2189 .hooknum = NF_INET_PRE_ROUTING,
2190 .priority = NF_IP_PRI_FIRST + 1,
2191 },
2192 {
2193 .hook = mtk_hnat_ipv6_nf_pre_routing,
2194 .pf = NFPROTO_IPV6,
2195 .hooknum = NF_INET_PRE_ROUTING,
2196 .priority = NF_IP_PRI_FIRST + 1,
2197 },
2198 {
2199 .hook = mtk_hnat_ipv6_nf_post_routing,
2200 .pf = NFPROTO_IPV6,
2201 .hooknum = NF_INET_POST_ROUTING,
2202 .priority = NF_IP_PRI_LAST,
2203 },
2204 {
2205 .hook = mtk_hnat_ipv6_nf_local_out,
2206 .pf = NFPROTO_IPV6,
2207 .hooknum = NF_INET_LOCAL_OUT,
2208 .priority = NF_IP_PRI_LAST,
2209 },
2210 {
2211 .hook = mtk_hnat_ipv4_nf_post_routing,
2212 .pf = NFPROTO_IPV4,
2213 .hooknum = NF_INET_POST_ROUTING,
2214 .priority = NF_IP_PRI_LAST,
2215 },
2216 {
2217 .hook = mtk_hnat_ipv4_nf_local_out,
2218 .pf = NFPROTO_IPV4,
2219 .hooknum = NF_INET_LOCAL_OUT,
2220 .priority = NF_IP_PRI_LAST,
2221 },
2222 {
2223 .hook = mtk_hnat_br_nf_local_in,
2224 .pf = NFPROTO_BRIDGE,
2225 .hooknum = NF_BR_LOCAL_IN,
2226 .priority = NF_BR_PRI_FIRST,
2227 },
2228 {
2229 .hook = mtk_hnat_br_nf_local_out,
2230 .pf = NFPROTO_BRIDGE,
2231 .hooknum = NF_BR_LOCAL_OUT,
2232 .priority = NF_BR_PRI_LAST - 1,
2233 },
2234 {
2235 .hook = mtk_pong_hqos_handler,
2236 .pf = NFPROTO_BRIDGE,
2237 .hooknum = NF_BR_PRE_ROUTING,
developer2b85f652021-08-19 16:09:50 +08002238 .priority = NF_BR_PRI_FIRST + 1,
developerfd40db22021-04-29 10:08:25 +08002239 },
2240};
2241
2242int hnat_register_nf_hooks(void)
2243{
2244 return nf_register_net_hooks(&init_net, mtk_hnat_nf_ops, ARRAY_SIZE(mtk_hnat_nf_ops));
2245}
2246
2247void hnat_unregister_nf_hooks(void)
2248{
2249 nf_unregister_net_hooks(&init_net, mtk_hnat_nf_ops, ARRAY_SIZE(mtk_hnat_nf_ops));
2250}
2251
2252int whnat_adjust_nf_hooks(void)
2253{
2254 struct nf_hook_ops *hook = mtk_hnat_nf_ops;
2255 unsigned int n = ARRAY_SIZE(mtk_hnat_nf_ops);
2256
developerfd40db22021-04-29 10:08:25 +08002257 while (n-- > 0) {
2258 if (hook[n].hook == mtk_hnat_br_nf_local_in) {
2259 hook[n].hooknum = NF_BR_PRE_ROUTING;
developer2b85f652021-08-19 16:09:50 +08002260 hook[n].priority = NF_BR_PRI_FIRST + 1;
developerfd40db22021-04-29 10:08:25 +08002261 } else if (hook[n].hook == mtk_hnat_br_nf_local_out) {
2262 hook[n].hooknum = NF_BR_POST_ROUTING;
2263 } else if (hook[n].hook == mtk_pong_hqos_handler) {
2264 hook[n].hook = mtk_hnat_br_nf_forward;
2265 hook[n].hooknum = NF_BR_FORWARD;
2266 hook[n].priority = NF_BR_PRI_LAST - 1;
2267 }
2268 }
2269
2270 return 0;
2271}
2272
developerfd40db22021-04-29 10:08:25 +08002273int mtk_hqos_ptype_cb(struct sk_buff *skb, struct net_device *dev,
2274 struct packet_type *pt, struct net_device *unused)
2275{
2276 struct vlan_ethhdr *veth = (struct vlan_ethhdr *)skb_mac_header(skb);
2277
2278 skb_hnat_entry(skb) = ntohs(veth->h_vlan_TCI) & 0x3fff;
2279 skb_hnat_reason(skb) = HIT_BIND_FORCE_TO_CPU;
2280
2281 do_hnat_ge_to_ext(skb, __func__);
2282
2283 return 0;
2284}
developerfd40db22021-04-29 10:08:25 +08002285