blob: da97b899887d1df6686aafddb428e4889767fddc [file] [log] [blame]
developerfd40db22021-04-29 10:08:25 +08001/* This program is free software; you can redistribute it and/or modify
2 * it under the terms of the GNU General Public License as published by
3 * the Free Software Foundation; version 2 of the License
4 *
5 * This program is distributed in the hope that it will be useful,
6 * but WITHOUT ANY WARRANTY; without even the implied warranty of
7 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
8 * GNU General Public License for more details.
9 *
10 * Copyright (C) 2014-2016 Sean Wang <sean.wang@mediatek.com>
11 * Copyright (C) 2016-2017 John Crispin <blogic@openwrt.org>
12 */
13
14#include <linux/netfilter_bridge.h>
15#include <linux/netfilter_ipv6.h>
16
17#include <net/arp.h>
18#include <net/neighbour.h>
19#include <net/netfilter/nf_conntrack_helper.h>
20#include <net/netfilter/nf_flow_table.h>
21#include <net/ipv6.h>
22#include <net/ip6_route.h>
23#include <net/ip.h>
24#include <net/tcp.h>
25#include <net/udp.h>
developer30a47682021-11-02 17:06:14 +080026#include <net/netfilter/nf_conntrack.h>
27#include <net/netfilter/nf_conntrack_acct.h>
developerfd40db22021-04-29 10:08:25 +080028
29#include "nf_hnat_mtk.h"
30#include "hnat.h"
31
32#include "../mtk_eth_soc.h"
33
34#define do_ge2ext_fast(dev, skb) \
35 ((IS_LAN(dev) || IS_WAN(dev) || IS_PPD(dev)) && \
36 skb_hnat_is_hashed(skb) && \
37 skb_hnat_reason(skb) == HIT_BIND_FORCE_TO_CPU)
38#define do_ext2ge_fast_learn(dev, skb) \
39 (IS_PPD(dev) && \
40 (skb_hnat_sport(skb) == NR_PDMA_PORT || \
41 skb_hnat_sport(skb) == NR_QDMA_PORT) && \
42 ((get_dev_from_index(skb->vlan_tci & VLAN_VID_MASK)) || \
43 get_wandev_from_index(skb->vlan_tci & VLAN_VID_MASK)))
44#define do_mape_w2l_fast(dev, skb) \
45 (mape_toggle && IS_WAN(dev) && (!is_from_mape(skb)))
46
47static struct ipv6hdr mape_l2w_v6h;
48static struct ipv6hdr mape_w2l_v6h;
49static inline uint8_t get_wifi_hook_if_index_from_dev(const struct net_device *dev)
50{
51 int i;
52
53 for (i = 1; i < MAX_IF_NUM; i++) {
54 if (hnat_priv->wifi_hook_if[i] == dev)
55 return i;
56 }
57
58 return 0;
59}
60
61static inline int get_ext_device_number(void)
62{
63 int i, number = 0;
64
65 for (i = 0; i < MAX_EXT_DEVS && hnat_priv->ext_if[i]; i++)
66 number += 1;
67 return number;
68}
69
70static inline int find_extif_from_devname(const char *name)
71{
72 int i;
73 struct extdev_entry *ext_entry;
74
75 for (i = 0; i < MAX_EXT_DEVS && hnat_priv->ext_if[i]; i++) {
76 ext_entry = hnat_priv->ext_if[i];
77 if (!strcmp(name, ext_entry->name))
78 return 1;
79 }
80 return 0;
81}
82
83static inline int get_index_from_dev(const struct net_device *dev)
84{
85 int i;
86 struct extdev_entry *ext_entry;
87
88 for (i = 0; i < MAX_EXT_DEVS && hnat_priv->ext_if[i]; i++) {
89 ext_entry = hnat_priv->ext_if[i];
90 if (dev == ext_entry->dev)
91 return ext_entry->dev->ifindex;
92 }
93 return 0;
94}
95
96static inline struct net_device *get_dev_from_index(int index)
97{
98 int i;
99 struct extdev_entry *ext_entry;
100 struct net_device *dev = 0;
101
102 for (i = 0; i < MAX_EXT_DEVS && hnat_priv->ext_if[i]; i++) {
103 ext_entry = hnat_priv->ext_if[i];
104 if (ext_entry->dev && index == ext_entry->dev->ifindex) {
105 dev = ext_entry->dev;
106 break;
107 }
108 }
109 return dev;
110}
111
112static inline struct net_device *get_wandev_from_index(int index)
113{
developer8c9c0d02021-06-18 16:15:37 +0800114 if (!hnat_priv->g_wandev)
115 hnat_priv->g_wandev = dev_get_by_name(&init_net, hnat_priv->wan);
developerfd40db22021-04-29 10:08:25 +0800116
developer8c9c0d02021-06-18 16:15:37 +0800117 if (hnat_priv->g_wandev && hnat_priv->g_wandev->ifindex == index)
118 return hnat_priv->g_wandev;
developerfd40db22021-04-29 10:08:25 +0800119 return NULL;
120}
121
122static inline int extif_set_dev(struct net_device *dev)
123{
124 int i;
125 struct extdev_entry *ext_entry;
126
127 for (i = 0; i < MAX_EXT_DEVS && hnat_priv->ext_if[i]; i++) {
128 ext_entry = hnat_priv->ext_if[i];
129 if (!strcmp(dev->name, ext_entry->name) && !ext_entry->dev) {
130 dev_hold(dev);
131 ext_entry->dev = dev;
132 pr_info("%s(%s)\n", __func__, dev->name);
133
134 return ext_entry->dev->ifindex;
135 }
136 }
137
138 return -1;
139}
140
141static inline int extif_put_dev(struct net_device *dev)
142{
143 int i;
144 struct extdev_entry *ext_entry;
145
146 for (i = 0; i < MAX_EXT_DEVS && hnat_priv->ext_if[i]; i++) {
147 ext_entry = hnat_priv->ext_if[i];
148 if (ext_entry->dev == dev) {
149 ext_entry->dev = NULL;
150 dev_put(dev);
151 pr_info("%s(%s)\n", __func__, dev->name);
152
developerbc53e5f2021-05-21 10:07:17 +0800153 return 0;
developerfd40db22021-04-29 10:08:25 +0800154 }
155 }
156
157 return -1;
158}
159
160int ext_if_add(struct extdev_entry *ext_entry)
161{
162 int len = get_ext_device_number();
163
developer4c32b7a2021-11-13 16:46:43 +0800164 if (len < MAX_EXT_DEVS)
165 hnat_priv->ext_if[len++] = ext_entry;
166
developerfd40db22021-04-29 10:08:25 +0800167 return len;
168}
169
170int ext_if_del(struct extdev_entry *ext_entry)
171{
172 int i, j;
173
174 for (i = 0; i < MAX_EXT_DEVS; i++) {
175 if (hnat_priv->ext_if[i] == ext_entry) {
176 for (j = i; hnat_priv->ext_if[j] && j < MAX_EXT_DEVS - 1; j++)
177 hnat_priv->ext_if[j] = hnat_priv->ext_if[j + 1];
178 hnat_priv->ext_if[j] = NULL;
179 break;
180 }
181 }
182
183 return i;
184}
185
186void foe_clear_all_bind_entries(struct net_device *dev)
187{
developer471f6562021-05-10 20:48:34 +0800188 int i, hash_index;
developerfd40db22021-04-29 10:08:25 +0800189 struct foe_entry *entry;
190
191 if (!IS_LAN(dev) && !IS_WAN(dev) &&
192 !find_extif_from_devname(dev->name) &&
193 !dev->netdev_ops->ndo_flow_offload_check)
194 return;
195
developer471f6562021-05-10 20:48:34 +0800196 for (i = 0; i < CFG_PPE_NUM; i++) {
197 cr_set_field(hnat_priv->ppe_base[i] + PPE_TB_CFG,
198 SMA, SMA_ONLY_FWD_CPU);
199
200 for (hash_index = 0; hash_index < hnat_priv->foe_etry_num; hash_index++) {
201 entry = hnat_priv->foe_table_cpu[i] + hash_index;
202 if (entry->bfib1.state == BIND) {
203 entry->ipv4_hnapt.udib1.state = INVALID;
204 entry->ipv4_hnapt.udib1.time_stamp =
205 readl((hnat_priv->fe_base + 0x0010)) & 0xFF;
206 }
developerfd40db22021-04-29 10:08:25 +0800207 }
208 }
209
210 /* clear HWNAT cache */
211 hnat_cache_ebl(1);
212
213 mod_timer(&hnat_priv->hnat_sma_build_entry_timer, jiffies + 3 * HZ);
214}
215
216static void gmac_ppe_fwd_enable(struct net_device *dev)
217{
218 if (IS_LAN(dev) || IS_GMAC1_MODE)
219 set_gmac_ppe_fwd(0, 1);
220 else if (IS_WAN(dev))
221 set_gmac_ppe_fwd(1, 1);
222}
223
224int nf_hnat_netdevice_event(struct notifier_block *unused, unsigned long event,
225 void *ptr)
226{
227 struct net_device *dev;
228
229 dev = netdev_notifier_info_to_dev(ptr);
230
231 switch (event) {
232 case NETDEV_UP:
233 gmac_ppe_fwd_enable(dev);
234
235 extif_set_dev(dev);
236
237 break;
238 case NETDEV_GOING_DOWN:
239 if (!get_wifi_hook_if_index_from_dev(dev))
240 extif_put_dev(dev);
241
242 foe_clear_all_bind_entries(dev);
243
244 break;
developer8c9c0d02021-06-18 16:15:37 +0800245 case NETDEV_UNREGISTER:
246 if (IS_PPD(dev) && hnat_priv->g_ppdev) {
247 hnat_priv->g_ppdev = NULL;
248 dev_put(dev);
249 }
250 if (IS_WAN(dev) && hnat_priv->g_wandev) {
251 hnat_priv->g_wandev = NULL;
252 dev_put(dev);
253 }
254
255 break;
256 case NETDEV_REGISTER:
257 if (IS_PPD(dev) && !hnat_priv->g_ppdev)
258 hnat_priv->g_ppdev = dev_get_by_name(&init_net, hnat_priv->ppd);
259 if (IS_WAN(dev) && !hnat_priv->g_wandev)
260 hnat_priv->g_wandev = dev_get_by_name(&init_net, hnat_priv->wan);
261
262 break;
developerfd40db22021-04-29 10:08:25 +0800263 default:
264 break;
265 }
266
267 return NOTIFY_DONE;
268}
269
270void foe_clear_entry(struct neighbour *neigh)
271{
272 u32 *daddr = (u32 *)neigh->primary_key;
273 unsigned char h_dest[ETH_ALEN];
274 struct foe_entry *entry;
developer471f6562021-05-10 20:48:34 +0800275 int i, hash_index;
developerfd40db22021-04-29 10:08:25 +0800276 u32 dip;
277
278 dip = (u32)(*daddr);
279
developer471f6562021-05-10 20:48:34 +0800280 for (i = 0; i < CFG_PPE_NUM; i++) {
281 for (hash_index = 0; hash_index < hnat_priv->foe_etry_num; hash_index++) {
282 entry = hnat_priv->foe_table_cpu[i] + hash_index;
283 if (entry->bfib1.state == BIND &&
284 entry->ipv4_hnapt.new_dip == ntohl(dip)) {
285 *((u32 *)h_dest) = swab32(entry->ipv4_hnapt.dmac_hi);
286 *((u16 *)&h_dest[4]) =
287 swab16(entry->ipv4_hnapt.dmac_lo);
288 if (strncmp(h_dest, neigh->ha, ETH_ALEN) != 0) {
289 pr_info("%s: state=%d\n", __func__,
290 neigh->nud_state);
291 cr_set_field(hnat_priv->ppe_base[i] + PPE_TB_CFG,
292 SMA, SMA_ONLY_FWD_CPU);
developerfd40db22021-04-29 10:08:25 +0800293
developer471f6562021-05-10 20:48:34 +0800294 entry->ipv4_hnapt.udib1.state = INVALID;
295 entry->ipv4_hnapt.udib1.time_stamp =
296 readl((hnat_priv->fe_base + 0x0010)) & 0xFF;
developerfd40db22021-04-29 10:08:25 +0800297
developer471f6562021-05-10 20:48:34 +0800298 /* clear HWNAT cache */
299 hnat_cache_ebl(1);
developerfd40db22021-04-29 10:08:25 +0800300
developer471f6562021-05-10 20:48:34 +0800301 mod_timer(&hnat_priv->hnat_sma_build_entry_timer,
302 jiffies + 3 * HZ);
developerfd40db22021-04-29 10:08:25 +0800303
developer471f6562021-05-10 20:48:34 +0800304 pr_info("Delete old entry: dip =%pI4\n", &dip);
305 pr_info("Old mac= %pM\n", h_dest);
306 pr_info("New mac= %pM\n", neigh->ha);
307 }
developerfd40db22021-04-29 10:08:25 +0800308 }
309 }
310 }
311}
312
313int nf_hnat_netevent_handler(struct notifier_block *unused, unsigned long event,
314 void *ptr)
315{
316 struct net_device *dev = NULL;
317 struct neighbour *neigh = NULL;
318
319 switch (event) {
320 case NETEVENT_NEIGH_UPDATE:
321 neigh = ptr;
322 dev = neigh->dev;
323 if (dev)
324 foe_clear_entry(neigh);
325 break;
326 }
327
328 return NOTIFY_DONE;
329}
330
331unsigned int mape_add_ipv6_hdr(struct sk_buff *skb, struct ipv6hdr mape_ip6h)
332{
333 struct ethhdr *eth = NULL;
334 struct ipv6hdr *ip6h = NULL;
335 struct iphdr *iph = NULL;
336
337 if (skb_headroom(skb) < IPV6_HDR_LEN || skb_shared(skb) ||
338 (skb_cloned(skb) && !skb_clone_writable(skb, 0))) {
339 return -1;
340 }
341
342 /* point to L3 */
343 memcpy(skb->data - IPV6_HDR_LEN - ETH_HLEN, skb_push(skb, ETH_HLEN), ETH_HLEN);
344 memcpy(skb_push(skb, IPV6_HDR_LEN - ETH_HLEN), &mape_ip6h, IPV6_HDR_LEN);
345
346 eth = (struct ethhdr *)(skb->data - ETH_HLEN);
347 eth->h_proto = htons(ETH_P_IPV6);
348 skb->protocol = htons(ETH_P_IPV6);
349
350 iph = (struct iphdr *)(skb->data + IPV6_HDR_LEN);
351 ip6h = (struct ipv6hdr *)(skb->data);
352 ip6h->payload_len = iph->tot_len; /* maybe different with ipv4 */
353
354 skb_set_network_header(skb, 0);
355 skb_set_transport_header(skb, iph->ihl * 4 + IPV6_HDR_LEN);
356 return 0;
357}
358
359static void fix_skb_packet_type(struct sk_buff *skb, struct net_device *dev,
360 struct ethhdr *eth)
361{
362 skb->pkt_type = PACKET_HOST;
363 if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
364 if (ether_addr_equal_64bits(eth->h_dest, dev->broadcast))
365 skb->pkt_type = PACKET_BROADCAST;
366 else
367 skb->pkt_type = PACKET_MULTICAST;
368 }
369}
370
371unsigned int do_hnat_ext_to_ge(struct sk_buff *skb, const struct net_device *in,
372 const char *func)
373{
374 if (hnat_priv->g_ppdev && hnat_priv->g_ppdev->flags & IFF_UP) {
375 u16 vlan_id = 0;
376 skb_set_network_header(skb, 0);
377 skb_push(skb, ETH_HLEN);
378 set_to_ppe(skb);
379
380 vlan_id = skb_vlan_tag_get_id(skb);
381 if (vlan_id) {
382 skb = vlan_insert_tag(skb, skb->vlan_proto, skb->vlan_tci);
383 if (!skb)
384 return -1;
385 }
386
387 /*set where we come from*/
388 skb->vlan_proto = htons(ETH_P_8021Q);
389 skb->vlan_tci =
390 (VLAN_CFI_MASK | (in->ifindex & VLAN_VID_MASK));
391 trace_printk(
392 "%s: vlan_prot=0x%x, vlan_tci=%x, in->name=%s, skb->dev->name=%s\n",
393 __func__, ntohs(skb->vlan_proto), skb->vlan_tci,
394 in->name, hnat_priv->g_ppdev->name);
395 skb->dev = hnat_priv->g_ppdev;
396 dev_queue_xmit(skb);
397 trace_printk("%s: called from %s successfully\n", __func__, func);
398 return 0;
399 }
400
401 trace_printk("%s: called from %s fail\n", __func__, func);
402 return -1;
403}
404
405unsigned int do_hnat_ext_to_ge2(struct sk_buff *skb, const char *func)
406{
407 struct ethhdr *eth = eth_hdr(skb);
408 struct net_device *dev;
409 struct foe_entry *entry;
410
411 trace_printk("%s: vlan_prot=0x%x, vlan_tci=%x\n", __func__,
412 ntohs(skb->vlan_proto), skb->vlan_tci);
413
414 dev = get_dev_from_index(skb->vlan_tci & VLAN_VID_MASK);
415
416 if (dev) {
417 /*set where we to go*/
418 skb->dev = dev;
419 skb->vlan_proto = 0;
420 skb->vlan_tci = 0;
421
422 if (ntohs(eth->h_proto) == ETH_P_8021Q) {
423 skb = skb_vlan_untag(skb);
424 if (unlikely(!skb))
425 return -1;
426 }
427
428 if (IS_BOND_MODE &&
429 (((hnat_priv->data->version == MTK_HNAT_V4) &&
430 (skb_hnat_entry(skb) != 0x7fff)) ||
431 ((hnat_priv->data->version != MTK_HNAT_V4) &&
432 (skb_hnat_entry(skb) != 0x3fff))))
433 skb_set_hash(skb, skb_hnat_entry(skb) >> 1, PKT_HASH_TYPE_L4);
434
435 set_from_extge(skb);
436 fix_skb_packet_type(skb, skb->dev, eth);
437 netif_rx(skb);
438 trace_printk("%s: called from %s successfully\n", __func__,
439 func);
440 return 0;
441 } else {
442 /* MapE WAN --> LAN/WLAN PingPong. */
443 dev = get_wandev_from_index(skb->vlan_tci & VLAN_VID_MASK);
444 if (mape_toggle && dev) {
445 if (!mape_add_ipv6_hdr(skb, mape_w2l_v6h)) {
446 skb_set_mac_header(skb, -ETH_HLEN);
447 skb->dev = dev;
448 set_from_mape(skb);
449 skb->vlan_proto = 0;
450 skb->vlan_tci = 0;
451 fix_skb_packet_type(skb, skb->dev, eth_hdr(skb));
developer471f6562021-05-10 20:48:34 +0800452 entry = &hnat_priv->foe_table_cpu[skb_hnat_ppe(skb)][skb_hnat_entry(skb)];
developerfd40db22021-04-29 10:08:25 +0800453 entry->bfib1.pkt_type = IPV4_HNAPT;
454 netif_rx(skb);
455 return 0;
456 }
457 }
458 trace_printk("%s: called from %s fail\n", __func__, func);
459 return -1;
460 }
461}
462
463unsigned int do_hnat_ge_to_ext(struct sk_buff *skb, const char *func)
464{
465 /*set where we to go*/
466 u8 index;
467 struct foe_entry *entry;
468 struct net_device *dev;
469
developer471f6562021-05-10 20:48:34 +0800470 entry = &hnat_priv->foe_table_cpu[skb_hnat_ppe(skb)][skb_hnat_entry(skb)];
developerfd40db22021-04-29 10:08:25 +0800471
472 if (IS_IPV4_GRP(entry))
473 index = entry->ipv4_hnapt.act_dp;
474 else
475 index = entry->ipv6_5t_route.act_dp;
476
477 skb->dev = get_dev_from_index(index);
478
developeraf07fad2021-11-19 17:53:42 +0800479 if (qos_toggle && eth_hdr(skb)->h_proto == HQOS_MAGIC_TAG) {
developerfd40db22021-04-29 10:08:25 +0800480 skb = skb_unshare(skb, GFP_ATOMIC);
481 if (!skb)
482 return NF_ACCEPT;
483
484 if (unlikely(!pskb_may_pull(skb, VLAN_HLEN)))
485 return NF_ACCEPT;
486
487 skb_pull_rcsum(skb, VLAN_HLEN);
488
489 memmove(skb->data - ETH_HLEN, skb->data - ETH_HLEN - VLAN_HLEN,
490 2 * ETH_ALEN);
491 }
developerfd40db22021-04-29 10:08:25 +0800492
493 if (skb->dev) {
494 skb_set_network_header(skb, 0);
495 skb_push(skb, ETH_HLEN);
496 dev_queue_xmit(skb);
497 trace_printk("%s: called from %s successfully\n", __func__,
498 func);
499 return 0;
500 } else {
501 if (mape_toggle) {
502 /* Add ipv6 header mape for lan/wlan -->wan */
503 dev = get_wandev_from_index(index);
504 if (dev) {
505 if (!mape_add_ipv6_hdr(skb, mape_l2w_v6h)) {
506 skb_set_network_header(skb, 0);
507 skb_push(skb, ETH_HLEN);
508 skb_set_mac_header(skb, 0);
509 skb->dev = dev;
510 dev_queue_xmit(skb);
511 return 0;
512 }
513 trace_printk("%s: called from %s fail[MapE]\n", __func__,
514 func);
515 return -1;
516 }
517 }
518 }
519 /*if external devices is down, invalidate related ppe entry*/
520 if (entry_hnat_is_bound(entry)) {
521 entry->bfib1.state = INVALID;
522 if (IS_IPV4_GRP(entry))
523 entry->ipv4_hnapt.act_dp = 0;
524 else
525 entry->ipv6_5t_route.act_dp = 0;
526
527 /* clear HWNAT cache */
528 hnat_cache_ebl(1);
529 }
530 trace_printk("%s: called from %s fail, index=%x\n", __func__,
531 func, index);
532 return -1;
533}
534
535static void pre_routing_print(struct sk_buff *skb, const struct net_device *in,
536 const struct net_device *out, const char *func)
537{
538 trace_printk(
539 "[%s]: %s(iif=0x%x CB2=0x%x)-->%s (ppe_hash=0x%x) sport=0x%x reason=0x%x alg=0x%x from %s\n",
540 __func__, in->name, skb_hnat_iface(skb),
541 HNAT_SKB_CB2(skb)->magic, out->name, skb_hnat_entry(skb),
542 skb_hnat_sport(skb), skb_hnat_reason(skb), skb_hnat_alg(skb),
543 func);
544}
545
546static void post_routing_print(struct sk_buff *skb, const struct net_device *in,
547 const struct net_device *out, const char *func)
548{
549 trace_printk(
550 "[%s]: %s(iif=0x%x, CB2=0x%x)-->%s (ppe_hash=0x%x) sport=0x%x reason=0x%x alg=0x%x from %s\n",
551 __func__, in->name, skb_hnat_iface(skb),
552 HNAT_SKB_CB2(skb)->magic, out->name, skb_hnat_entry(skb),
553 skb_hnat_sport(skb), skb_hnat_reason(skb), skb_hnat_alg(skb),
554 func);
555}
556
557static inline void hnat_set_iif(const struct nf_hook_state *state,
558 struct sk_buff *skb, int val)
559{
developer40017972021-06-29 14:27:35 +0800560 if (IS_WHNAT(state->in) && FROM_WED(skb)) {
developere567ad32021-05-25 17:16:17 +0800561 return;
562 } else if (IS_LAN(state->in)) {
developerfd40db22021-04-29 10:08:25 +0800563 skb_hnat_iface(skb) = FOE_MAGIC_GE_LAN;
564 } else if (IS_PPD(state->in)) {
565 skb_hnat_iface(skb) = FOE_MAGIC_GE_PPD;
566 } else if (IS_EXT(state->in)) {
567 skb_hnat_iface(skb) = FOE_MAGIC_EXT;
568 } else if (IS_WAN(state->in)) {
569 skb_hnat_iface(skb) = FOE_MAGIC_GE_WAN;
developerfd40db22021-04-29 10:08:25 +0800570 } else if (!IS_BR(state->in)) {
developer99506e52021-06-30 22:03:02 +0800571 if (state->in->netdev_ops->ndo_flow_offload_check) {
572 skb_hnat_iface(skb) = FOE_MAGIC_GE_VIRTUAL;
573 } else {
574 skb_hnat_iface(skb) = FOE_INVALID;
developerfd40db22021-04-29 10:08:25 +0800575
developer99506e52021-06-30 22:03:02 +0800576 if (is_magic_tag_valid(skb) &&
577 IS_SPACE_AVAILABLE_HEAD(skb))
578 memset(skb_hnat_info(skb), 0, FOE_INFO_LEN);
579 }
developerfd40db22021-04-29 10:08:25 +0800580 }
581}
582
583static inline void hnat_set_alg(const struct nf_hook_state *state,
584 struct sk_buff *skb, int val)
585{
586 skb_hnat_alg(skb) = val;
587}
588
589static inline void hnat_set_head_frags(const struct nf_hook_state *state,
590 struct sk_buff *head_skb, int val,
591 void (*fn)(const struct nf_hook_state *state,
592 struct sk_buff *skb, int val))
593{
594 struct sk_buff *segs = skb_shinfo(head_skb)->frag_list;
595
596 fn(state, head_skb, val);
597 while (segs) {
598 fn(state, segs, val);
599 segs = segs->next;
600 }
601}
602
603unsigned int do_hnat_mape_w2l_fast(struct sk_buff *skb, const struct net_device *in,
604 const char *func)
605{
606 struct ipv6hdr *ip6h = ipv6_hdr(skb);
607 struct iphdr _iphdr;
608 struct iphdr *iph;
609 struct ethhdr *eth;
610
611 /* WAN -> LAN/WLAN MapE. */
612 if (mape_toggle && (ip6h->nexthdr == NEXTHDR_IPIP)) {
613 iph = skb_header_pointer(skb, IPV6_HDR_LEN, sizeof(_iphdr), &_iphdr);
developer4c32b7a2021-11-13 16:46:43 +0800614 if (unlikely(!iph))
615 return -1;
616
developerfd40db22021-04-29 10:08:25 +0800617 switch (iph->protocol) {
618 case IPPROTO_UDP:
619 case IPPROTO_TCP:
620 break;
621 default:
622 return -1;
623 }
624 mape_w2l_v6h = *ip6h;
625
626 /* Remove ipv6 header. */
627 memcpy(skb->data + IPV6_HDR_LEN - ETH_HLEN,
628 skb->data - ETH_HLEN, ETH_HLEN);
629 skb_pull(skb, IPV6_HDR_LEN - ETH_HLEN);
630 skb_set_mac_header(skb, 0);
631 skb_set_network_header(skb, ETH_HLEN);
632 skb_set_transport_header(skb, ETH_HLEN + sizeof(_iphdr));
633
634 eth = eth_hdr(skb);
635 eth->h_proto = htons(ETH_P_IP);
636 set_to_ppe(skb);
637
638 skb->vlan_proto = htons(ETH_P_8021Q);
639 skb->vlan_tci =
640 (VLAN_CFI_MASK | (in->ifindex & VLAN_VID_MASK));
641
642 if (!hnat_priv->g_ppdev)
643 hnat_priv->g_ppdev = dev_get_by_name(&init_net, hnat_priv->ppd);
644
645 skb->dev = hnat_priv->g_ppdev;
646 skb->protocol = htons(ETH_P_IP);
647
648 dev_queue_xmit(skb);
649
650 return 0;
651 }
652 return -1;
653}
654
655static unsigned int is_ppe_support_type(struct sk_buff *skb)
656{
657 struct ethhdr *eth = NULL;
658 struct iphdr *iph = NULL;
659 struct ipv6hdr *ip6h = NULL;
660 struct iphdr _iphdr;
661
662 eth = eth_hdr(skb);
developerfd2d7422021-06-09 17:09:39 +0800663 if (!is_magic_tag_valid(skb) || !IS_SPACE_AVAILABLE_HEAD(skb) ||
developer8116b0a2021-08-23 18:07:20 +0800664 is_multicast_ether_addr(eth->h_dest))
developerfd40db22021-04-29 10:08:25 +0800665 return 0;
666
667 switch (ntohs(skb->protocol)) {
668 case ETH_P_IP:
669 iph = ip_hdr(skb);
670
671 /* do not accelerate non tcp/udp traffic */
672 if ((iph->protocol == IPPROTO_TCP) ||
673 (iph->protocol == IPPROTO_UDP) ||
674 (iph->protocol == IPPROTO_IPV6)) {
675 return 1;
676 }
677
678 break;
679 case ETH_P_IPV6:
680 ip6h = ipv6_hdr(skb);
681
682 if ((ip6h->nexthdr == NEXTHDR_TCP) ||
683 (ip6h->nexthdr == NEXTHDR_UDP)) {
684 return 1;
685 } else if (ip6h->nexthdr == NEXTHDR_IPIP) {
686 iph = skb_header_pointer(skb, IPV6_HDR_LEN,
687 sizeof(_iphdr), &_iphdr);
developer4c32b7a2021-11-13 16:46:43 +0800688 if (unlikely(!iph))
689 return 0;
developerfd40db22021-04-29 10:08:25 +0800690
691 if ((iph->protocol == IPPROTO_TCP) ||
692 (iph->protocol == IPPROTO_UDP)) {
693 return 1;
694 }
695
696 }
697
698 break;
699 case ETH_P_8021Q:
700 return 1;
701 }
702
703 return 0;
704}
705
706static unsigned int
707mtk_hnat_ipv6_nf_pre_routing(void *priv, struct sk_buff *skb,
708 const struct nf_hook_state *state)
709{
710 if (!is_ppe_support_type(skb)) {
711 hnat_set_head_frags(state, skb, 1, hnat_set_alg);
712 return NF_ACCEPT;
713 }
714
715 hnat_set_head_frags(state, skb, -1, hnat_set_iif);
716
717 pre_routing_print(skb, state->in, state->out, __func__);
718
developerfd40db22021-04-29 10:08:25 +0800719 /* packets from external devices -> xxx ,step 1 , learning stage & bound stage*/
720 if (do_ext2ge_fast_try(state->in, skb)) {
721 if (!do_hnat_ext_to_ge(skb, state->in, __func__))
722 return NF_STOLEN;
723 if (!skb)
724 goto drop;
725 return NF_ACCEPT;
726 }
727
728 /* packets form ge -> external device
729 * For standalone wan interface
730 */
731 if (do_ge2ext_fast(state->in, skb)) {
732 if (!do_hnat_ge_to_ext(skb, __func__))
733 return NF_STOLEN;
734 goto drop;
735 }
736
737 /* MapE need remove ipv6 header and pingpong. */
738 if (do_mape_w2l_fast(state->in, skb)) {
739 if (!do_hnat_mape_w2l_fast(skb, state->in, __func__))
740 return NF_STOLEN;
741 else
742 return NF_ACCEPT;
743 }
744
745 if (is_from_mape(skb))
746 clr_from_extge(skb);
747
748 return NF_ACCEPT;
749drop:
750 printk_ratelimited(KERN_WARNING
751 "%s:drop (in_dev=%s, iif=0x%x, CB2=0x%x, ppe_hash=0x%x, sport=0x%x, reason=0x%x, alg=0x%x)\n",
752 __func__, state->in->name, skb_hnat_iface(skb),
753 HNAT_SKB_CB2(skb)->magic, skb_hnat_entry(skb),
754 skb_hnat_sport(skb), skb_hnat_reason(skb),
755 skb_hnat_alg(skb));
756
757 return NF_DROP;
758}
759
760static unsigned int
761mtk_hnat_ipv4_nf_pre_routing(void *priv, struct sk_buff *skb,
762 const struct nf_hook_state *state)
763{
764 if (!is_ppe_support_type(skb)) {
765 hnat_set_head_frags(state, skb, 1, hnat_set_alg);
766 return NF_ACCEPT;
767 }
768
769 hnat_set_head_frags(state, skb, -1, hnat_set_iif);
770
771 pre_routing_print(skb, state->in, state->out, __func__);
772
developerfd40db22021-04-29 10:08:25 +0800773 /* packets from external devices -> xxx ,step 1 , learning stage & bound stage*/
774 if (do_ext2ge_fast_try(state->in, skb)) {
775 if (!do_hnat_ext_to_ge(skb, state->in, __func__))
776 return NF_STOLEN;
777 if (!skb)
778 goto drop;
779 return NF_ACCEPT;
780 }
781
782 /* packets form ge -> external device
783 * For standalone wan interface
784 */
785 if (do_ge2ext_fast(state->in, skb)) {
786 if (!do_hnat_ge_to_ext(skb, __func__))
787 return NF_STOLEN;
788 goto drop;
789 }
790
791 return NF_ACCEPT;
792drop:
793 printk_ratelimited(KERN_WARNING
794 "%s:drop (in_dev=%s, iif=0x%x, CB2=0x%x, ppe_hash=0x%x, sport=0x%x, reason=0x%x, alg=0x%x)\n",
795 __func__, state->in->name, skb_hnat_iface(skb),
796 HNAT_SKB_CB2(skb)->magic, skb_hnat_entry(skb),
797 skb_hnat_sport(skb), skb_hnat_reason(skb),
798 skb_hnat_alg(skb));
799
800 return NF_DROP;
801}
802
803static unsigned int
804mtk_hnat_br_nf_local_in(void *priv, struct sk_buff *skb,
805 const struct nf_hook_state *state)
806{
developerfd40db22021-04-29 10:08:25 +0800807 struct vlan_ethhdr *veth;
808
developeraf07fad2021-11-19 17:53:42 +0800809 if (qos_toggle && hnat_priv->data->whnat) {
developerfd40db22021-04-29 10:08:25 +0800810 veth = (struct vlan_ethhdr *)skb_mac_header(skb);
811
812 if (eth_hdr(skb)->h_proto == HQOS_MAGIC_TAG) {
813 skb_hnat_entry(skb) = ntohs(veth->h_vlan_TCI) & 0x3fff;
814 skb_hnat_reason(skb) = HIT_BIND_FORCE_TO_CPU;
815 }
816 }
developerfd40db22021-04-29 10:08:25 +0800817
818 if (!HAS_HQOS_MAGIC_TAG(skb) && !is_ppe_support_type(skb)) {
819 hnat_set_head_frags(state, skb, 1, hnat_set_alg);
820 return NF_ACCEPT;
821 }
822
823 hnat_set_head_frags(state, skb, -1, hnat_set_iif);
824
825 pre_routing_print(skb, state->in, state->out, __func__);
826
827 if (unlikely(debug_level >= 7)) {
828 hnat_cpu_reason_cnt(skb);
829 if (skb_hnat_reason(skb) == dbg_cpu_reason)
830 foe_dump_pkt(skb);
831 }
832
developerfd40db22021-04-29 10:08:25 +0800833 /* packets from external devices -> xxx ,step 1 , learning stage & bound stage*/
834 if ((skb_hnat_iface(skb) == FOE_MAGIC_EXT) && !is_from_extge(skb) &&
835 !is_multicast_ether_addr(eth_hdr(skb)->h_dest)) {
836 if (!hnat_priv->g_ppdev)
837 hnat_priv->g_ppdev = dev_get_by_name(&init_net, hnat_priv->ppd);
838
839 if (!do_hnat_ext_to_ge(skb, state->in, __func__))
840 return NF_STOLEN;
841 if (!skb)
842 goto drop;
843 return NF_ACCEPT;
844 }
845
846 if (hnat_priv->data->whnat) {
847 if (skb_hnat_iface(skb) == FOE_MAGIC_EXT)
848 clr_from_extge(skb);
849
850 /* packets from external devices -> xxx ,step 2, learning stage */
developeraf07fad2021-11-19 17:53:42 +0800851 if (do_ext2ge_fast_learn(state->in, skb) && (!qos_toggle ||
852 (qos_toggle && eth_hdr(skb)->h_proto != HQOS_MAGIC_TAG))) {
developerfd40db22021-04-29 10:08:25 +0800853 if (!do_hnat_ext_to_ge2(skb, __func__))
854 return NF_STOLEN;
855 goto drop;
856 }
857
858 /* packets form ge -> external device */
859 if (do_ge2ext_fast(state->in, skb)) {
860 if (!do_hnat_ge_to_ext(skb, __func__))
861 return NF_STOLEN;
862 goto drop;
863 }
864 }
865
866 /* MapE need remove ipv6 header and pingpong. (bridge mode) */
867 if (do_mape_w2l_fast(state->in, skb)) {
868 if (!do_hnat_mape_w2l_fast(skb, state->in, __func__))
869 return NF_STOLEN;
870 else
871 return NF_ACCEPT;
872 }
873
874 return NF_ACCEPT;
875drop:
876 printk_ratelimited(KERN_WARNING
877 "%s:drop (in_dev=%s, iif=0x%x, CB2=0x%x, ppe_hash=0x%x, sport=0x%x, reason=0x%x, alg=0x%x)\n",
878 __func__, state->in->name, skb_hnat_iface(skb),
879 HNAT_SKB_CB2(skb)->magic, skb_hnat_entry(skb),
880 skb_hnat_sport(skb), skb_hnat_reason(skb),
881 skb_hnat_alg(skb));
882
883 return NF_DROP;
884}
885
886static unsigned int hnat_ipv6_get_nexthop(struct sk_buff *skb,
887 const struct net_device *out,
888 struct flow_offload_hw_path *hw_path)
889{
890 const struct in6_addr *ipv6_nexthop;
891 struct neighbour *neigh = NULL;
892 struct dst_entry *dst = skb_dst(skb);
893 struct ethhdr *eth;
894
895 if (hw_path->flags & FLOW_OFFLOAD_PATH_PPPOE) {
896 memcpy(eth_hdr(skb)->h_source, hw_path->eth_src, ETH_ALEN);
897 memcpy(eth_hdr(skb)->h_dest, hw_path->eth_dest, ETH_ALEN);
898 return 0;
899 }
900
901 rcu_read_lock_bh();
902 ipv6_nexthop =
903 rt6_nexthop((struct rt6_info *)dst, &ipv6_hdr(skb)->daddr);
904 neigh = __ipv6_neigh_lookup_noref(dst->dev, ipv6_nexthop);
905 if (unlikely(!neigh)) {
906 dev_notice(hnat_priv->dev, "%s:No neigh (daddr=%pI6)\n", __func__,
907 &ipv6_hdr(skb)->daddr);
908 rcu_read_unlock_bh();
909 return -1;
910 }
911
912 /* why do we get all zero ethernet address ? */
913 if (!is_valid_ether_addr(neigh->ha)) {
914 rcu_read_unlock_bh();
915 return -1;
916 }
917
918 if (ipv6_hdr(skb)->nexthdr == NEXTHDR_IPIP) {
919 /*copy ether type for DS-Lite and MapE */
920 eth = (struct ethhdr *)(skb->data - ETH_HLEN);
921 eth->h_proto = skb->protocol;
922 } else {
923 eth = eth_hdr(skb);
924 }
925
926 ether_addr_copy(eth->h_dest, neigh->ha);
927 ether_addr_copy(eth->h_source, out->dev_addr);
928
929 rcu_read_unlock_bh();
930
931 return 0;
932}
933
934static unsigned int hnat_ipv4_get_nexthop(struct sk_buff *skb,
935 const struct net_device *out,
936 struct flow_offload_hw_path *hw_path)
937{
938 u32 nexthop;
939 struct neighbour *neigh;
940 struct dst_entry *dst = skb_dst(skb);
941 struct rtable *rt = (struct rtable *)dst;
942 struct net_device *dev = (__force struct net_device *)out;
943
944 if (hw_path->flags & FLOW_OFFLOAD_PATH_PPPOE) {
945 memcpy(eth_hdr(skb)->h_source, hw_path->eth_src, ETH_ALEN);
946 memcpy(eth_hdr(skb)->h_dest, hw_path->eth_dest, ETH_ALEN);
947 return 0;
948 }
949
950 rcu_read_lock_bh();
951 nexthop = (__force u32)rt_nexthop(rt, ip_hdr(skb)->daddr);
952 neigh = __ipv4_neigh_lookup_noref(dev, nexthop);
953 if (unlikely(!neigh)) {
954 dev_notice(hnat_priv->dev, "%s:No neigh (daddr=%pI4)\n", __func__,
955 &ip_hdr(skb)->daddr);
956 rcu_read_unlock_bh();
957 return -1;
958 }
959
960 /* why do we get all zero ethernet address ? */
961 if (!is_valid_ether_addr(neigh->ha)) {
962 rcu_read_unlock_bh();
963 return -1;
964 }
965
966 memcpy(eth_hdr(skb)->h_dest, neigh->ha, ETH_ALEN);
967 memcpy(eth_hdr(skb)->h_source, out->dev_addr, ETH_ALEN);
968
969 rcu_read_unlock_bh();
970
971 return 0;
972}
973
974static u16 ppe_get_chkbase(struct iphdr *iph)
975{
976 u16 org_chksum = ntohs(iph->check);
977 u16 org_tot_len = ntohs(iph->tot_len);
978 u16 org_id = ntohs(iph->id);
979 u16 chksum_tmp, tot_len_tmp, id_tmp;
980 u32 tmp = 0;
981 u16 chksum_base = 0;
982
983 chksum_tmp = ~(org_chksum);
984 tot_len_tmp = ~(org_tot_len);
985 id_tmp = ~(org_id);
986 tmp = chksum_tmp + tot_len_tmp + id_tmp;
987 tmp = ((tmp >> 16) & 0x7) + (tmp & 0xFFFF);
988 tmp = ((tmp >> 16) & 0x7) + (tmp & 0xFFFF);
989 chksum_base = tmp & 0xFFFF;
990
991 return chksum_base;
992}
993
994struct foe_entry ppe_fill_L2_info(struct ethhdr *eth, struct foe_entry entry,
995 struct flow_offload_hw_path *hw_path)
996{
997 switch (entry.bfib1.pkt_type) {
998 case IPV4_HNAPT:
999 case IPV4_HNAT:
1000 entry.ipv4_hnapt.dmac_hi = swab32(*((u32 *)eth->h_dest));
1001 entry.ipv4_hnapt.dmac_lo = swab16(*((u16 *)&eth->h_dest[4]));
1002 entry.ipv4_hnapt.smac_hi = swab32(*((u32 *)eth->h_source));
1003 entry.ipv4_hnapt.smac_lo = swab16(*((u16 *)&eth->h_source[4]));
1004 entry.ipv4_hnapt.pppoe_id = hw_path->pppoe_sid;
1005 break;
1006 case IPV4_DSLITE:
1007 case IPV4_MAP_E:
1008 case IPV6_6RD:
1009 case IPV6_5T_ROUTE:
1010 case IPV6_3T_ROUTE:
1011 entry.ipv6_5t_route.dmac_hi = swab32(*((u32 *)eth->h_dest));
1012 entry.ipv6_5t_route.dmac_lo = swab16(*((u16 *)&eth->h_dest[4]));
1013 entry.ipv6_5t_route.smac_hi = swab32(*((u32 *)eth->h_source));
1014 entry.ipv6_5t_route.smac_lo =
1015 swab16(*((u16 *)&eth->h_source[4]));
1016 entry.ipv6_5t_route.pppoe_id = hw_path->pppoe_sid;
1017 break;
1018 }
1019 return entry;
1020}
1021
1022struct foe_entry ppe_fill_info_blk(struct ethhdr *eth, struct foe_entry entry,
1023 struct flow_offload_hw_path *hw_path)
1024{
1025 entry.bfib1.psn = (hw_path->flags & FLOW_OFFLOAD_PATH_PPPOE) ? 1 : 0;
1026 entry.bfib1.vlan_layer += (hw_path->flags & FLOW_OFFLOAD_PATH_VLAN) ? 1 : 0;
1027 entry.bfib1.vpm = (entry.bfib1.vlan_layer) ? 1 : 0;
1028 entry.bfib1.ttl = 1;
1029 entry.bfib1.cah = 1;
1030 entry.bfib1.ka = 1;
1031 entry.bfib1.time_stamp = (hnat_priv->data->version == MTK_HNAT_V4) ?
1032 readl(hnat_priv->fe_base + 0x0010) & (0xFF) :
1033 readl(hnat_priv->fe_base + 0x0010) & (0x7FFF);
1034
1035 switch (entry.bfib1.pkt_type) {
1036 case IPV4_HNAPT:
1037 case IPV4_HNAT:
developer8116b0a2021-08-23 18:07:20 +08001038 if (hnat_priv->data->mcast &&
1039 is_multicast_ether_addr(&eth->h_dest[0])) {
developerfd40db22021-04-29 10:08:25 +08001040 entry.ipv4_hnapt.iblk2.mcast = 1;
1041 if (hnat_priv->data->version == MTK_HNAT_V3) {
1042 entry.bfib1.sta = 1;
1043 entry.ipv4_hnapt.m_timestamp = foe_timestamp(hnat_priv);
1044 }
1045 } else {
1046 entry.ipv4_hnapt.iblk2.mcast = 0;
1047 }
1048
1049 entry.ipv4_hnapt.iblk2.port_ag =
developer24948202021-11-24 17:38:27 +08001050 (hnat_priv->data->version == MTK_HNAT_V4) ? 0xf : 0x3f;
developerfd40db22021-04-29 10:08:25 +08001051 break;
1052 case IPV4_DSLITE:
1053 case IPV4_MAP_E:
1054 case IPV6_6RD:
1055 case IPV6_5T_ROUTE:
1056 case IPV6_3T_ROUTE:
developer8116b0a2021-08-23 18:07:20 +08001057 if (hnat_priv->data->mcast &&
1058 is_multicast_ether_addr(&eth->h_dest[0])) {
developerfd40db22021-04-29 10:08:25 +08001059 entry.ipv6_5t_route.iblk2.mcast = 1;
1060 if (hnat_priv->data->version == MTK_HNAT_V3) {
1061 entry.bfib1.sta = 1;
1062 entry.ipv4_hnapt.m_timestamp = foe_timestamp(hnat_priv);
1063 }
1064 } else {
1065 entry.ipv6_5t_route.iblk2.mcast = 0;
1066 }
1067
1068 entry.ipv6_5t_route.iblk2.port_ag =
developer24948202021-11-24 17:38:27 +08001069 (hnat_priv->data->version == MTK_HNAT_V4) ? 0xf : 0x3f;
developerfd40db22021-04-29 10:08:25 +08001070 break;
1071 }
1072 return entry;
1073}
1074
1075static void ppe_fill_flow_lbl(struct foe_entry *entry, struct ipv6hdr *ip6h)
1076{
1077 entry->ipv4_dslite.flow_lbl[0] = ip6h->flow_lbl[2];
1078 entry->ipv4_dslite.flow_lbl[1] = ip6h->flow_lbl[1];
1079 entry->ipv4_dslite.flow_lbl[2] = ip6h->flow_lbl[0];
1080}
1081
1082static unsigned int skb_to_hnat_info(struct sk_buff *skb,
1083 const struct net_device *dev,
1084 struct foe_entry *foe,
1085 struct flow_offload_hw_path *hw_path)
1086{
1087 struct foe_entry entry = { 0 };
1088 int whnat = IS_WHNAT(dev);
1089 struct ethhdr *eth;
1090 struct iphdr *iph;
1091 struct ipv6hdr *ip6h;
1092 struct tcpudphdr _ports;
1093 const struct tcpudphdr *pptr;
1094 u32 gmac = NR_DISCARD;
1095 int udp = 0;
1096 u32 qid = 0;
developeraf07fad2021-11-19 17:53:42 +08001097 u32 port_id = 0;
developerfd40db22021-04-29 10:08:25 +08001098 int mape = 0;
1099
1100 if (ipv6_hdr(skb)->nexthdr == NEXTHDR_IPIP)
1101 /* point to ethernet header for DS-Lite and MapE */
1102 eth = (struct ethhdr *)(skb->data - ETH_HLEN);
1103 else
1104 eth = eth_hdr(skb);
developer8116b0a2021-08-23 18:07:20 +08001105
1106 /*do not bind multicast if PPE mcast not enable*/
1107 if (!hnat_priv->data->mcast && is_multicast_ether_addr(eth->h_dest))
1108 return 0;
developerfd40db22021-04-29 10:08:25 +08001109
1110 entry.bfib1.pkt_type = foe->udib1.pkt_type; /* Get packte type state*/
1111#if defined(CONFIG_MEDIATEK_NETSYS_V2)
1112 entry.bfib1.sp = foe->udib1.sp;
1113#endif
1114
1115 switch (ntohs(eth->h_proto)) {
1116 case ETH_P_IP:
1117 iph = ip_hdr(skb);
1118 switch (iph->protocol) {
1119 case IPPROTO_UDP:
1120 udp = 1;
1121 /* fallthrough */
1122 case IPPROTO_TCP:
1123 entry.ipv4_hnapt.etype = htons(ETH_P_IP);
1124
1125 /* DS-Lite WAN->LAN */
1126 if (entry.ipv4_hnapt.bfib1.pkt_type == IPV4_DSLITE ||
1127 entry.ipv4_hnapt.bfib1.pkt_type == IPV4_MAP_E) {
1128 entry.ipv4_dslite.sip = foe->ipv4_dslite.sip;
1129 entry.ipv4_dslite.dip = foe->ipv4_dslite.dip;
1130 entry.ipv4_dslite.sport =
1131 foe->ipv4_dslite.sport;
1132 entry.ipv4_dslite.dport =
1133 foe->ipv4_dslite.dport;
1134
1135#if defined(CONFIG_MEDIATEK_NETSYS_V2)
1136 if (entry.bfib1.pkt_type == IPV4_MAP_E) {
1137 pptr = skb_header_pointer(skb,
1138 iph->ihl * 4,
1139 sizeof(_ports),
1140 &_ports);
developer4c32b7a2021-11-13 16:46:43 +08001141 if (unlikely(!pptr))
1142 return -1;
developerfd40db22021-04-29 10:08:25 +08001143
1144 entry.ipv4_dslite.new_sip =
1145 ntohl(iph->saddr);
1146 entry.ipv4_dslite.new_dip =
1147 ntohl(iph->daddr);
1148 entry.ipv4_dslite.new_sport =
1149 ntohs(pptr->src);
1150 entry.ipv4_dslite.new_dport =
1151 ntohs(pptr->dst);
1152 }
1153#endif
1154
1155 entry.ipv4_dslite.tunnel_sipv6_0 =
1156 foe->ipv4_dslite.tunnel_sipv6_0;
1157 entry.ipv4_dslite.tunnel_sipv6_1 =
1158 foe->ipv4_dslite.tunnel_sipv6_1;
1159 entry.ipv4_dslite.tunnel_sipv6_2 =
1160 foe->ipv4_dslite.tunnel_sipv6_2;
1161 entry.ipv4_dslite.tunnel_sipv6_3 =
1162 foe->ipv4_dslite.tunnel_sipv6_3;
1163
1164 entry.ipv4_dslite.tunnel_dipv6_0 =
1165 foe->ipv4_dslite.tunnel_dipv6_0;
1166 entry.ipv4_dslite.tunnel_dipv6_1 =
1167 foe->ipv4_dslite.tunnel_dipv6_1;
1168 entry.ipv4_dslite.tunnel_dipv6_2 =
1169 foe->ipv4_dslite.tunnel_dipv6_2;
1170 entry.ipv4_dslite.tunnel_dipv6_3 =
1171 foe->ipv4_dslite.tunnel_dipv6_3;
1172
1173 entry.ipv4_dslite.bfib1.rmt = 1;
1174 entry.ipv4_dslite.iblk2.dscp = iph->tos;
1175 entry.ipv4_dslite.vlan1 = hw_path->vlan_id;
1176 if (hnat_priv->data->per_flow_accounting)
1177 entry.ipv4_dslite.iblk2.mibf = 1;
1178
1179 } else {
1180 entry.ipv4_hnapt.iblk2.dscp = iph->tos;
1181 if (hnat_priv->data->per_flow_accounting)
1182 entry.ipv4_hnapt.iblk2.mibf = 1;
1183
1184 entry.ipv4_hnapt.vlan1 = hw_path->vlan_id;
1185
1186 if (skb->vlan_tci && FROM_GE_WAN(skb) && IS_LAN(dev)) {
1187 entry.bfib1.vlan_layer += 1;
1188
1189 if (entry.ipv4_hnapt.vlan1)
1190 entry.ipv4_hnapt.vlan2 = (skb->vlan_tci & VLAN_VID_MASK);
1191 else
1192 entry.ipv4_hnapt.vlan1 = (skb->vlan_tci & VLAN_VID_MASK);
1193 }
1194
1195 entry.ipv4_hnapt.sip = foe->ipv4_hnapt.sip;
1196 entry.ipv4_hnapt.dip = foe->ipv4_hnapt.dip;
1197 entry.ipv4_hnapt.sport = foe->ipv4_hnapt.sport;
1198 entry.ipv4_hnapt.dport = foe->ipv4_hnapt.dport;
1199
1200 entry.ipv4_hnapt.new_sip = ntohl(iph->saddr);
1201 entry.ipv4_hnapt.new_dip = ntohl(iph->daddr);
1202 }
1203
1204 entry.ipv4_hnapt.bfib1.udp = udp;
1205 if (IS_IPV4_HNAPT(foe)) {
1206 pptr = skb_header_pointer(skb, iph->ihl * 4,
1207 sizeof(_ports),
1208 &_ports);
developer4c32b7a2021-11-13 16:46:43 +08001209 if (unlikely(!pptr))
1210 return -1;
1211
developerfd40db22021-04-29 10:08:25 +08001212 entry.ipv4_hnapt.new_sport = ntohs(pptr->src);
1213 entry.ipv4_hnapt.new_dport = ntohs(pptr->dst);
1214 }
1215
1216 break;
1217
1218 default:
1219 return -1;
1220 }
1221 trace_printk(
1222 "[%s]skb->head=%p, skb->data=%p,ip_hdr=%p, skb->len=%d, skb->data_len=%d\n",
1223 __func__, skb->head, skb->data, iph, skb->len,
1224 skb->data_len);
1225 break;
1226
1227 case ETH_P_IPV6:
1228 ip6h = ipv6_hdr(skb);
1229 switch (ip6h->nexthdr) {
1230 case NEXTHDR_UDP:
1231 udp = 1;
1232 /* fallthrough */
1233 case NEXTHDR_TCP: /* IPv6-5T or IPv6-3T */
1234 entry.ipv6_5t_route.etype = htons(ETH_P_IPV6);
1235
1236 entry.ipv6_5t_route.vlan1 = hw_path->vlan_id;
1237
1238 if (skb->vlan_tci && FROM_GE_WAN(skb) && IS_LAN(dev)) {
1239 entry.bfib1.vlan_layer += 1;
1240
1241 if (entry.ipv6_5t_route.vlan1)
1242 entry.ipv6_5t_route.vlan2 = (skb->vlan_tci & VLAN_VID_MASK);
1243 else
1244 entry.ipv6_5t_route.vlan1 = (skb->vlan_tci & VLAN_VID_MASK);
1245 }
1246
1247 if (hnat_priv->data->per_flow_accounting)
1248 entry.ipv6_5t_route.iblk2.mibf = 1;
1249 entry.ipv6_5t_route.bfib1.udp = udp;
1250
1251 if (IS_IPV6_6RD(foe)) {
1252 entry.ipv6_5t_route.bfib1.rmt = 1;
1253 entry.ipv6_6rd.tunnel_sipv4 =
1254 foe->ipv6_6rd.tunnel_sipv4;
1255 entry.ipv6_6rd.tunnel_dipv4 =
1256 foe->ipv6_6rd.tunnel_dipv4;
1257 }
1258
1259 entry.ipv6_3t_route.ipv6_sip0 =
1260 foe->ipv6_3t_route.ipv6_sip0;
1261 entry.ipv6_3t_route.ipv6_sip1 =
1262 foe->ipv6_3t_route.ipv6_sip1;
1263 entry.ipv6_3t_route.ipv6_sip2 =
1264 foe->ipv6_3t_route.ipv6_sip2;
1265 entry.ipv6_3t_route.ipv6_sip3 =
1266 foe->ipv6_3t_route.ipv6_sip3;
1267
1268 entry.ipv6_3t_route.ipv6_dip0 =
1269 foe->ipv6_3t_route.ipv6_dip0;
1270 entry.ipv6_3t_route.ipv6_dip1 =
1271 foe->ipv6_3t_route.ipv6_dip1;
1272 entry.ipv6_3t_route.ipv6_dip2 =
1273 foe->ipv6_3t_route.ipv6_dip2;
1274 entry.ipv6_3t_route.ipv6_dip3 =
1275 foe->ipv6_3t_route.ipv6_dip3;
1276
developer729f0272021-06-09 17:28:38 +08001277 if (IS_IPV6_3T_ROUTE(foe)) {
1278 entry.ipv6_3t_route.prot =
1279 foe->ipv6_3t_route.prot;
1280 entry.ipv6_3t_route.hph =
1281 foe->ipv6_3t_route.hph;
1282 }
1283
developerfd40db22021-04-29 10:08:25 +08001284 if (IS_IPV6_5T_ROUTE(foe) || IS_IPV6_6RD(foe)) {
1285 entry.ipv6_5t_route.sport =
1286 foe->ipv6_5t_route.sport;
1287 entry.ipv6_5t_route.dport =
1288 foe->ipv6_5t_route.dport;
1289 }
1290 entry.ipv6_5t_route.iblk2.dscp =
1291 (ip6h->priority << 4 |
1292 (ip6h->flow_lbl[0] >> 4));
1293 break;
1294
1295 case NEXTHDR_IPIP:
1296 if ((!mape_toggle &&
1297 entry.bfib1.pkt_type == IPV4_DSLITE) ||
1298 (mape_toggle &&
1299 entry.bfib1.pkt_type == IPV4_MAP_E)) {
1300 /* DS-Lite LAN->WAN */
1301 entry.ipv4_dslite.bfib1.udp =
1302 foe->ipv4_dslite.bfib1.udp;
1303 entry.ipv4_dslite.sip = foe->ipv4_dslite.sip;
1304 entry.ipv4_dslite.dip = foe->ipv4_dslite.dip;
1305 entry.ipv4_dslite.sport =
1306 foe->ipv4_dslite.sport;
1307 entry.ipv4_dslite.dport =
1308 foe->ipv4_dslite.dport;
1309
1310 entry.ipv4_dslite.tunnel_sipv6_0 =
1311 ntohl(ip6h->saddr.s6_addr32[0]);
1312 entry.ipv4_dslite.tunnel_sipv6_1 =
1313 ntohl(ip6h->saddr.s6_addr32[1]);
1314 entry.ipv4_dslite.tunnel_sipv6_2 =
1315 ntohl(ip6h->saddr.s6_addr32[2]);
1316 entry.ipv4_dslite.tunnel_sipv6_3 =
1317 ntohl(ip6h->saddr.s6_addr32[3]);
1318
1319 entry.ipv4_dslite.tunnel_dipv6_0 =
1320 ntohl(ip6h->daddr.s6_addr32[0]);
1321 entry.ipv4_dslite.tunnel_dipv6_1 =
1322 ntohl(ip6h->daddr.s6_addr32[1]);
1323 entry.ipv4_dslite.tunnel_dipv6_2 =
1324 ntohl(ip6h->daddr.s6_addr32[2]);
1325 entry.ipv4_dslite.tunnel_dipv6_3 =
1326 ntohl(ip6h->daddr.s6_addr32[3]);
1327
1328 ppe_fill_flow_lbl(&entry, ip6h);
1329
1330 entry.ipv4_dslite.priority = ip6h->priority;
1331 entry.ipv4_dslite.hop_limit = ip6h->hop_limit;
1332 entry.ipv4_dslite.vlan1 = hw_path->vlan_id;
1333 if (hnat_priv->data->per_flow_accounting)
1334 entry.ipv4_dslite.iblk2.mibf = 1;
1335 } else if (mape_toggle &&
1336 entry.bfib1.pkt_type == IPV4_HNAPT) {
1337 /* MapE LAN -> WAN */
1338 mape = 1;
1339 entry.ipv4_hnapt.iblk2.dscp =
1340 foe->ipv4_hnapt.iblk2.dscp;
1341 if (hnat_priv->data->per_flow_accounting)
1342 entry.ipv4_hnapt.iblk2.mibf = 1;
1343
developerbb816412021-06-11 15:43:44 +08001344 if (IS_GMAC1_MODE)
1345 entry.ipv4_hnapt.vlan1 = 1;
1346 else
1347 entry.ipv4_hnapt.vlan1 = hw_path->vlan_id;
developerfd40db22021-04-29 10:08:25 +08001348
1349 entry.ipv4_hnapt.sip = foe->ipv4_hnapt.sip;
1350 entry.ipv4_hnapt.dip = foe->ipv4_hnapt.dip;
1351 entry.ipv4_hnapt.sport = foe->ipv4_hnapt.sport;
1352 entry.ipv4_hnapt.dport = foe->ipv4_hnapt.dport;
1353
1354 entry.ipv4_hnapt.new_sip =
1355 foe->ipv4_hnapt.new_sip;
1356 entry.ipv4_hnapt.new_dip =
1357 foe->ipv4_hnapt.new_dip;
1358 entry.ipv4_hnapt.etype = htons(ETH_P_IP);
1359
developeraf07fad2021-11-19 17:53:42 +08001360 if (qos_toggle) {
1361 entry.ipv4_hnapt.iblk2.qid =
1362 (hnat_priv->data->version == MTK_HNAT_V4) ?
1363 skb->mark & 0x7f : skb->mark & 0xf;
1364 entry.ipv4_hnapt.iblk2.fqos = 1;
1365 }
developerfd40db22021-04-29 10:08:25 +08001366
1367 entry.ipv4_hnapt.bfib1.udp =
1368 foe->ipv4_hnapt.bfib1.udp;
1369
1370 entry.ipv4_hnapt.new_sport =
1371 foe->ipv4_hnapt.new_sport;
1372 entry.ipv4_hnapt.new_dport =
1373 foe->ipv4_hnapt.new_dport;
1374 mape_l2w_v6h = *ip6h;
1375 }
1376 break;
1377
1378 default:
1379 return -1;
1380 }
1381
1382 trace_printk(
1383 "[%s]skb->head=%p, skb->data=%p,ipv6_hdr=%p, skb->len=%d, skb->data_len=%d\n",
1384 __func__, skb->head, skb->data, ip6h, skb->len,
1385 skb->data_len);
1386 break;
1387
1388 default:
developerfd40db22021-04-29 10:08:25 +08001389 iph = ip_hdr(skb);
1390 switch (entry.bfib1.pkt_type) {
1391 case IPV6_6RD: /* 6RD LAN->WAN */
1392 entry.ipv6_6rd.ipv6_sip0 = foe->ipv6_6rd.ipv6_sip0;
1393 entry.ipv6_6rd.ipv6_sip1 = foe->ipv6_6rd.ipv6_sip1;
1394 entry.ipv6_6rd.ipv6_sip2 = foe->ipv6_6rd.ipv6_sip2;
1395 entry.ipv6_6rd.ipv6_sip3 = foe->ipv6_6rd.ipv6_sip3;
1396
1397 entry.ipv6_6rd.ipv6_dip0 = foe->ipv6_6rd.ipv6_dip0;
1398 entry.ipv6_6rd.ipv6_dip1 = foe->ipv6_6rd.ipv6_dip1;
1399 entry.ipv6_6rd.ipv6_dip2 = foe->ipv6_6rd.ipv6_dip2;
1400 entry.ipv6_6rd.ipv6_dip3 = foe->ipv6_6rd.ipv6_dip3;
1401
1402 entry.ipv6_6rd.sport = foe->ipv6_6rd.sport;
1403 entry.ipv6_6rd.dport = foe->ipv6_6rd.dport;
1404 entry.ipv6_6rd.tunnel_sipv4 = ntohl(iph->saddr);
1405 entry.ipv6_6rd.tunnel_dipv4 = ntohl(iph->daddr);
1406 entry.ipv6_6rd.hdr_chksum = ppe_get_chkbase(iph);
1407 entry.ipv6_6rd.flag = (ntohs(iph->frag_off) >> 13);
1408 entry.ipv6_6rd.ttl = iph->ttl;
1409 entry.ipv6_6rd.dscp = iph->tos;
1410 entry.ipv6_6rd.per_flow_6rd_id = 1;
1411 entry.ipv6_6rd.vlan1 = hw_path->vlan_id;
1412 if (hnat_priv->data->per_flow_accounting)
1413 entry.ipv6_6rd.iblk2.mibf = 1;
1414 break;
1415
1416 default:
1417 return -1;
1418 }
1419 }
1420
1421 /* Fill Layer2 Info.*/
1422 entry = ppe_fill_L2_info(eth, entry, hw_path);
1423
1424 /* Fill Info Blk*/
1425 entry = ppe_fill_info_blk(eth, entry, hw_path);
1426
1427 if (IS_LAN(dev)) {
1428 if (IS_DSA_LAN(dev))
developeraf07fad2021-11-19 17:53:42 +08001429 port_id = hnat_dsa_fill_stag(dev, &entry, hw_path,
1430 ntohs(eth->h_proto),
1431 mape);
developerfd40db22021-04-29 10:08:25 +08001432
1433 if (IS_BOND_MODE)
1434 gmac = ((skb_hnat_entry(skb) >> 1) % hnat_priv->gmac_num) ?
1435 NR_GMAC2_PORT : NR_GMAC1_PORT;
1436 else
1437 gmac = NR_GMAC1_PORT;
1438 } else if (IS_WAN(dev)) {
1439 if (IS_DSA_WAN(dev))
developeraf07fad2021-11-19 17:53:42 +08001440 port_id = hnat_dsa_fill_stag(dev,&entry, hw_path,
1441 ntohs(eth->h_proto),
1442 mape);
developerfd40db22021-04-29 10:08:25 +08001443 if (mape_toggle && mape == 1) {
1444 gmac = NR_PDMA_PORT;
1445 /* Set act_dp = wan_dev */
1446 entry.ipv4_hnapt.act_dp = dev->ifindex;
1447 } else {
1448 gmac = (IS_GMAC1_MODE) ? NR_GMAC1_PORT : NR_GMAC2_PORT;
1449 }
1450 } else if (IS_EXT(dev) && (FROM_GE_PPD(skb) || FROM_GE_LAN(skb) ||
developer99506e52021-06-30 22:03:02 +08001451 FROM_GE_WAN(skb) || FROM_GE_VIRTUAL(skb) || FROM_WED(skb))) {
developerfd40db22021-04-29 10:08:25 +08001452 if (!hnat_priv->data->whnat && IS_GMAC1_MODE) {
1453 entry.bfib1.vpm = 1;
1454 entry.bfib1.vlan_layer = 1;
1455
1456 if (FROM_GE_LAN(skb))
1457 entry.ipv4_hnapt.vlan1 = 1;
1458 else if (FROM_GE_WAN(skb) || FROM_GE_VIRTUAL(skb))
1459 entry.ipv4_hnapt.vlan1 = 2;
1460 }
1461
1462 trace_printk("learn of lan or wan(iif=%x) --> %s(ext)\n",
1463 skb_hnat_iface(skb), dev->name);
1464 /* To CPU then stolen by pre-routing hant hook of LAN/WAN
1465 * Current setting is PDMA RX.
1466 */
1467 gmac = NR_PDMA_PORT;
1468 if (IS_IPV4_GRP(foe))
1469 entry.ipv4_hnapt.act_dp = dev->ifindex;
1470 else
1471 entry.ipv6_5t_route.act_dp = dev->ifindex;
1472 } else {
1473 printk_ratelimited(KERN_WARNING
1474 "Unknown case of dp, iif=%x --> %s\n",
1475 skb_hnat_iface(skb), dev->name);
1476
1477 return 0;
1478 }
1479
developeraf07fad2021-11-19 17:53:42 +08001480 if (IS_HQOS_MODE)
1481 qid = skb->mark & (MTK_QDMA_TX_MASK);
1482 else if (IS_PPPQ_MODE)
1483 qid = port_id & MTK_QDMA_TX_MASK;
1484 else
1485 qid = 0;
developerfd40db22021-04-29 10:08:25 +08001486
1487 if (IS_IPV4_GRP(foe)) {
1488 entry.ipv4_hnapt.iblk2.dp = gmac;
1489 entry.ipv4_hnapt.iblk2.port_mg =
1490 (hnat_priv->data->version == MTK_HNAT_V1) ? 0x3f : 0;
developer24948202021-11-24 17:38:27 +08001491
developeraf07fad2021-11-19 17:53:42 +08001492 if (qos_toggle) {
1493 if (hnat_priv->data->version == MTK_HNAT_V4) {
1494 entry.ipv4_hnapt.iblk2.qid = qid & 0x7f;
1495 } else {
1496 /* qid[5:0]= port_mg[1:0]+ qid[3:0] */
1497 entry.ipv4_hnapt.iblk2.qid = qid & 0xf;
1498 if (hnat_priv->data->version != MTK_HNAT_V1)
1499 entry.ipv4_hnapt.iblk2.port_mg |=
1500 ((qid >> 4) & 0x3);
developerfd40db22021-04-29 10:08:25 +08001501
developeraf07fad2021-11-19 17:53:42 +08001502 if (((IS_EXT(dev) && (FROM_GE_LAN(skb) ||
1503 FROM_GE_WAN(skb) || FROM_GE_VIRTUAL(skb))) ||
1504 ((mape_toggle && mape == 1) && !FROM_EXT(skb))) &&
1505 (!whnat)) {
1506 entry.ipv4_hnapt.etype = htons(HQOS_MAGIC_TAG);
1507 entry.ipv4_hnapt.vlan1 = skb_hnat_entry(skb);
1508 entry.bfib1.vlan_layer = 1;
1509 }
developerfd40db22021-04-29 10:08:25 +08001510 }
developerfd40db22021-04-29 10:08:25 +08001511
developeraf07fad2021-11-19 17:53:42 +08001512 if (FROM_EXT(skb) || skb_hnat_sport(skb) == NR_QDMA_PORT)
1513 entry.ipv4_hnapt.iblk2.fqos = 0;
1514 else
1515 entry.ipv4_hnapt.iblk2.fqos = 1;
1516 } else {
developerfd40db22021-04-29 10:08:25 +08001517 entry.ipv4_hnapt.iblk2.fqos = 0;
developeraf07fad2021-11-19 17:53:42 +08001518 }
developerfd40db22021-04-29 10:08:25 +08001519 } else {
1520 entry.ipv6_5t_route.iblk2.dp = gmac;
1521 entry.ipv6_5t_route.iblk2.port_mg =
1522 (hnat_priv->data->version == MTK_HNAT_V1) ? 0x3f : 0;
developer24948202021-11-24 17:38:27 +08001523
developeraf07fad2021-11-19 17:53:42 +08001524 if (qos_toggle) {
1525 if (hnat_priv->data->version == MTK_HNAT_V4) {
1526 entry.ipv6_5t_route.iblk2.qid = qid & 0x7f;
1527 } else {
1528 /* qid[5:0]= port_mg[1:0]+ qid[3:0] */
1529 entry.ipv6_5t_route.iblk2.qid = qid & 0xf;
1530 if (hnat_priv->data->version != MTK_HNAT_V1)
1531 entry.ipv6_5t_route.iblk2.port_mg |=
1532 ((qid >> 4) & 0x3);
developerfd40db22021-04-29 10:08:25 +08001533
developeraf07fad2021-11-19 17:53:42 +08001534 if (IS_EXT(dev) && (FROM_GE_LAN(skb) ||
1535 FROM_GE_WAN(skb) || FROM_GE_VIRTUAL(skb)) &&
1536 (!whnat)) {
1537 entry.ipv6_5t_route.etype = htons(HQOS_MAGIC_TAG);
1538 entry.ipv6_5t_route.vlan1 = skb_hnat_entry(skb);
1539 entry.bfib1.vlan_layer = 1;
1540 }
developerfd40db22021-04-29 10:08:25 +08001541 }
developerfd40db22021-04-29 10:08:25 +08001542
developeraf07fad2021-11-19 17:53:42 +08001543 if (FROM_EXT(skb))
1544 entry.ipv6_5t_route.iblk2.fqos = 0;
1545 else
1546 entry.ipv6_5t_route.iblk2.fqos = 1;
1547 } else {
developerfd40db22021-04-29 10:08:25 +08001548 entry.ipv6_5t_route.iblk2.fqos = 0;
developeraf07fad2021-11-19 17:53:42 +08001549 }
developerfd40db22021-04-29 10:08:25 +08001550 }
1551
developer60e60962021-06-15 21:05:07 +08001552 /* The INFO2.port_mg and 2nd VLAN ID fields of PPE entry are redefined
1553 * by Wi-Fi whnat engine. These data and INFO2.dp will be updated and
1554 * the entry is set to BIND state in mtk_sw_nat_hook_tx().
1555 */
1556 if (!whnat)
1557 entry.bfib1.state = BIND;
1558
developerfd40db22021-04-29 10:08:25 +08001559 memcpy(foe, &entry, sizeof(entry));
1560 /*reset statistic for this entry*/
1561 if (hnat_priv->data->per_flow_accounting)
developer471f6562021-05-10 20:48:34 +08001562 memset(&hnat_priv->acct[skb_hnat_ppe(skb)][skb_hnat_entry(skb)],
1563 0, sizeof(struct mib_entry));
developerfd40db22021-04-29 10:08:25 +08001564
1565 wmb();
developerfdfe1572021-09-13 16:56:33 +08001566 skb_hnat_filled(skb) = HNAT_INFO_FILLED;
developerfd40db22021-04-29 10:08:25 +08001567
1568 return 0;
1569}
1570
1571int mtk_sw_nat_hook_tx(struct sk_buff *skb, int gmac_no)
1572{
1573 struct foe_entry *entry;
1574 struct ethhdr *eth;
1575
developerfdfe1572021-09-13 16:56:33 +08001576 if (skb_hnat_alg(skb) || !is_hnat_info_filled(skb) ||
1577 !is_magic_tag_valid(skb) || !IS_SPACE_AVAILABLE_HEAD(skb))
developerfd40db22021-04-29 10:08:25 +08001578 return NF_ACCEPT;
1579
1580 trace_printk(
1581 "[%s]entry=%x reason=%x gmac_no=%x wdmaid=%x rxid=%x wcid=%x bssid=%x\n",
1582 __func__, skb_hnat_entry(skb), skb_hnat_reason(skb), gmac_no,
1583 skb_hnat_wdma_id(skb), skb_hnat_bss_id(skb),
1584 skb_hnat_wc_id(skb), skb_hnat_rx_id(skb));
1585
developer99506e52021-06-30 22:03:02 +08001586 if ((gmac_no != NR_WDMA0_PORT) && (gmac_no != NR_WDMA1_PORT) &&
1587 (gmac_no != NR_WHNAT_WDMA_PORT))
1588 return NF_ACCEPT;
1589
developerfd40db22021-04-29 10:08:25 +08001590 if (!skb_hnat_is_hashed(skb))
1591 return NF_ACCEPT;
1592
developer955a6f62021-07-26 10:54:39 +08001593 if (skb_hnat_entry(skb) >= hnat_priv->foe_etry_num ||
1594 skb_hnat_ppe(skb) >= CFG_PPE_NUM)
1595 return NF_ACCEPT;
1596
developer471f6562021-05-10 20:48:34 +08001597 entry = &hnat_priv->foe_table_cpu[skb_hnat_ppe(skb)][skb_hnat_entry(skb)];
developerfd40db22021-04-29 10:08:25 +08001598 if (entry_hnat_is_bound(entry))
1599 return NF_ACCEPT;
1600
1601 if (skb_hnat_reason(skb) != HIT_UNBIND_RATE_REACH)
1602 return NF_ACCEPT;
1603
1604 eth = eth_hdr(skb);
developer8116b0a2021-08-23 18:07:20 +08001605
1606 /*not bind multicast if PPE mcast not enable*/
developerfdfe1572021-09-13 16:56:33 +08001607 if (!hnat_priv->data->mcast) {
1608 if (is_multicast_ether_addr(eth->h_dest))
1609 return NF_ACCEPT;
1610
1611 if (IS_IPV4_GRP(entry))
1612 entry->ipv4_hnapt.iblk2.mcast = 0;
1613 else
1614 entry->ipv6_5t_route.iblk2.mcast = 0;
1615 }
developerfd40db22021-04-29 10:08:25 +08001616
1617 /* Some mt_wifi virtual interfaces, such as apcli,
1618 * will change the smac for specail purpose.
1619 */
1620 switch (entry->bfib1.pkt_type) {
1621 case IPV4_HNAPT:
1622 case IPV4_HNAT:
1623 entry->ipv4_hnapt.smac_hi = swab32(*((u32 *)eth->h_source));
1624 entry->ipv4_hnapt.smac_lo = swab16(*((u16 *)&eth->h_source[4]));
1625 break;
1626 case IPV4_DSLITE:
1627 case IPV4_MAP_E:
1628 case IPV6_6RD:
1629 case IPV6_5T_ROUTE:
1630 case IPV6_3T_ROUTE:
1631 entry->ipv6_5t_route.smac_hi = swab32(*((u32 *)eth->h_source));
1632 entry->ipv6_5t_route.smac_lo = swab16(*((u16 *)&eth->h_source[4]));
1633 break;
1634 }
1635
developer0ff76882021-10-26 10:54:13 +08001636 if (skb->vlan_tci) {
1637 entry->bfib1.vlan_layer += 1;
1638 entry->bfib1.vpm = 1;
1639 if (IS_IPV4_GRP(entry)) {
1640 entry->ipv4_hnapt.etype = htons(ETH_P_8021Q);
1641 if(entry->ipv4_hnapt.vlan1)
1642 entry->ipv4_hnapt.vlan2 = skb->vlan_tci;
1643 else
1644 entry->ipv4_hnapt.vlan1 = skb->vlan_tci;
1645 } else if (IS_IPV6_GRP(entry)) {
1646 entry->ipv6_5t_route.etype = htons(ETH_P_8021Q);
1647 if(entry->ipv6_5t_route.vlan1)
1648 entry->ipv6_5t_route.vlan2 = skb->vlan_tci;
1649 else
1650 entry->ipv6_5t_route.vlan1 = skb->vlan_tci;
1651 }
1652 } else {
1653 entry->bfib1.vpm = 0;
1654 entry->bfib1.vlan_layer = 0;
1655 }
developer60e60962021-06-15 21:05:07 +08001656
developerfd40db22021-04-29 10:08:25 +08001657 /* MT7622 wifi hw_nat not support QoS */
1658 if (IS_IPV4_GRP(entry)) {
1659 entry->ipv4_hnapt.iblk2.fqos = 0;
developere567ad32021-05-25 17:16:17 +08001660 if ((hnat_priv->data->version == MTK_HNAT_V2 &&
1661 gmac_no == NR_WHNAT_WDMA_PORT) ||
1662 (hnat_priv->data->version == MTK_HNAT_V4 &&
1663 (gmac_no == NR_WDMA0_PORT || gmac_no == NR_WDMA1_PORT))) {
developerfd40db22021-04-29 10:08:25 +08001664 entry->ipv4_hnapt.winfo.bssid = skb_hnat_bss_id(skb);
1665 entry->ipv4_hnapt.winfo.wcid = skb_hnat_wc_id(skb);
1666#if defined(CONFIG_MEDIATEK_NETSYS_V2)
developer24948202021-11-24 17:38:27 +08001667 entry->ipv4_hnapt.iblk2.fqos = (qos_toggle) ? 1 : 0;
developerfd40db22021-04-29 10:08:25 +08001668 entry->ipv4_hnapt.iblk2.rxid = skb_hnat_rx_id(skb);
1669 entry->ipv4_hnapt.iblk2.winfoi = 1;
1670#else
1671 entry->ipv4_hnapt.winfo.rxid = skb_hnat_rx_id(skb);
1672 entry->ipv4_hnapt.iblk2w.winfoi = 1;
1673 entry->ipv4_hnapt.iblk2w.wdmaid = skb_hnat_wdma_id(skb);
1674#endif
1675 } else {
1676 if (IS_GMAC1_MODE && !hnat_dsa_is_enable(hnat_priv)) {
1677 entry->bfib1.vpm = 1;
1678 entry->bfib1.vlan_layer = 1;
1679
1680 if (FROM_GE_LAN(skb))
1681 entry->ipv4_hnapt.vlan1 = 1;
1682 else if (FROM_GE_WAN(skb) || FROM_GE_VIRTUAL(skb))
1683 entry->ipv4_hnapt.vlan1 = 2;
1684 }
1685
developeraf07fad2021-11-19 17:53:42 +08001686 if (qos_toggle &&
1687 (FROM_GE_LAN(skb) || FROM_GE_WAN(skb) || FROM_GE_VIRTUAL(skb))) {
developerfd40db22021-04-29 10:08:25 +08001688 entry->bfib1.vpm = 0;
1689 entry->bfib1.vlan_layer = 1;
1690 entry->ipv4_hnapt.etype = htons(HQOS_MAGIC_TAG);
1691 entry->ipv4_hnapt.vlan1 = skb_hnat_entry(skb);
1692 entry->ipv4_hnapt.iblk2.fqos = 1;
1693 }
developerfd40db22021-04-29 10:08:25 +08001694 }
1695 entry->ipv4_hnapt.iblk2.dp = gmac_no;
1696 } else {
1697 entry->ipv6_5t_route.iblk2.fqos = 0;
developere567ad32021-05-25 17:16:17 +08001698 if ((hnat_priv->data->version == MTK_HNAT_V2 &&
1699 gmac_no == NR_WHNAT_WDMA_PORT) ||
1700 (hnat_priv->data->version == MTK_HNAT_V4 &&
1701 (gmac_no == NR_WDMA0_PORT || gmac_no == NR_WDMA1_PORT))) {
developerfd40db22021-04-29 10:08:25 +08001702 entry->ipv6_5t_route.winfo.bssid = skb_hnat_bss_id(skb);
1703 entry->ipv6_5t_route.winfo.wcid = skb_hnat_wc_id(skb);
1704#if defined(CONFIG_MEDIATEK_NETSYS_V2)
developer24948202021-11-24 17:38:27 +08001705 entry->ipv6_5t_route.iblk2.fqos = (qos_toggle) ? 1 : 0;
developerfd40db22021-04-29 10:08:25 +08001706 entry->ipv6_5t_route.iblk2.rxid = skb_hnat_rx_id(skb);
1707 entry->ipv6_5t_route.iblk2.winfoi = 1;
1708#else
1709 entry->ipv6_5t_route.winfo.rxid = skb_hnat_rx_id(skb);
1710 entry->ipv6_5t_route.iblk2w.winfoi = 1;
1711 entry->ipv6_5t_route.iblk2w.wdmaid = skb_hnat_wdma_id(skb);
1712#endif
1713 } else {
1714 if (IS_GMAC1_MODE && !hnat_dsa_is_enable(hnat_priv)) {
1715 entry->bfib1.vpm = 1;
1716 entry->bfib1.vlan_layer = 1;
1717
1718 if (FROM_GE_LAN(skb))
1719 entry->ipv6_5t_route.vlan1 = 1;
1720 else if (FROM_GE_WAN(skb) || FROM_GE_VIRTUAL(skb))
1721 entry->ipv6_5t_route.vlan1 = 2;
1722 }
1723
developeraf07fad2021-11-19 17:53:42 +08001724 if (qos_toggle &&
1725 (FROM_GE_LAN(skb) || FROM_GE_WAN(skb) || FROM_GE_VIRTUAL(skb))) {
developerfd40db22021-04-29 10:08:25 +08001726 entry->bfib1.vpm = 0;
1727 entry->bfib1.vlan_layer = 1;
1728 entry->ipv6_5t_route.etype = htons(HQOS_MAGIC_TAG);
1729 entry->ipv6_5t_route.vlan1 = skb_hnat_entry(skb);
1730 entry->ipv6_5t_route.iblk2.fqos = 1;
1731 }
developerfd40db22021-04-29 10:08:25 +08001732 }
1733 entry->ipv6_5t_route.iblk2.dp = gmac_no;
1734 }
1735
1736 entry->bfib1.state = BIND;
1737
1738 return NF_ACCEPT;
1739}
1740
1741int mtk_sw_nat_hook_rx(struct sk_buff *skb)
1742{
developer99506e52021-06-30 22:03:02 +08001743 if (!IS_SPACE_AVAILABLE_HEAD(skb) || !FROM_WED(skb)) {
1744 skb_hnat_magic_tag(skb) = 0;
developerfd40db22021-04-29 10:08:25 +08001745 return NF_ACCEPT;
developer99506e52021-06-30 22:03:02 +08001746 }
developerfd40db22021-04-29 10:08:25 +08001747
1748 skb_hnat_alg(skb) = 0;
developerfdfe1572021-09-13 16:56:33 +08001749 skb_hnat_filled(skb) = 0;
developerfd40db22021-04-29 10:08:25 +08001750 skb_hnat_magic_tag(skb) = HNAT_MAGIC_TAG;
1751
1752 if (skb_hnat_iface(skb) == FOE_MAGIC_WED0)
1753 skb_hnat_sport(skb) = NR_WDMA0_PORT;
1754 else if (skb_hnat_iface(skb) == FOE_MAGIC_WED1)
1755 skb_hnat_sport(skb) = NR_WDMA1_PORT;
1756
1757 return NF_ACCEPT;
1758}
1759
1760void mtk_ppe_dev_register_hook(struct net_device *dev)
1761{
1762 int i, number = 0;
1763 struct extdev_entry *ext_entry;
1764
developerfd40db22021-04-29 10:08:25 +08001765 for (i = 1; i < MAX_IF_NUM; i++) {
1766 if (hnat_priv->wifi_hook_if[i] == dev) {
1767 pr_info("%s : %s has been registered in wifi_hook_if table[%d]\n",
1768 __func__, dev->name, i);
1769 return;
1770 }
1771 if (!hnat_priv->wifi_hook_if[i]) {
1772 if (find_extif_from_devname(dev->name)) {
1773 extif_set_dev(dev);
1774 goto add_wifi_hook_if;
1775 }
1776
1777 number = get_ext_device_number();
1778 if (number >= MAX_EXT_DEVS) {
1779 pr_info("%s : extdev array is full. %s is not registered\n",
1780 __func__, dev->name);
1781 return;
1782 }
1783
1784 ext_entry = kzalloc(sizeof(*ext_entry), GFP_KERNEL);
1785 if (!ext_entry)
1786 return;
1787
developer4c32b7a2021-11-13 16:46:43 +08001788 strncpy(ext_entry->name, dev->name, IFNAMSIZ - 1);
developerfd40db22021-04-29 10:08:25 +08001789 dev_hold(dev);
1790 ext_entry->dev = dev;
1791 ext_if_add(ext_entry);
1792
1793add_wifi_hook_if:
1794 dev_hold(dev);
1795 hnat_priv->wifi_hook_if[i] = dev;
1796
1797 break;
1798 }
1799 }
1800 pr_info("%s : ineterface %s register (%d)\n", __func__, dev->name, i);
1801}
1802
1803void mtk_ppe_dev_unregister_hook(struct net_device *dev)
1804{
1805 int i;
1806
1807 for (i = 1; i < MAX_IF_NUM; i++) {
1808 if (hnat_priv->wifi_hook_if[i] == dev) {
1809 hnat_priv->wifi_hook_if[i] = NULL;
1810 dev_put(dev);
1811
1812 break;
1813 }
1814 }
1815
1816 extif_put_dev(dev);
1817 pr_info("%s : ineterface %s set null (%d)\n", __func__, dev->name, i);
1818}
1819
1820static unsigned int mtk_hnat_accel_type(struct sk_buff *skb)
1821{
1822 struct dst_entry *dst;
1823 struct nf_conn *ct;
1824 enum ip_conntrack_info ctinfo;
1825 const struct nf_conn_help *help;
1826
1827 /* Do not accelerate 1st round of xfrm flow, and 2nd round of xfrm flow
1828 * is from local_out which is also filtered in sanity check.
1829 */
1830 dst = skb_dst(skb);
1831 if (dst && dst_xfrm(dst))
1832 return 0;
1833
1834 ct = nf_ct_get(skb, &ctinfo);
1835 if (!ct)
1836 return 1;
1837
1838 /* rcu_read_lock()ed by nf_hook_slow */
1839 help = nfct_help(ct);
1840 if (help && rcu_dereference(help->helper))
1841 return 0;
1842
1843 return 1;
1844}
1845
developer6f4a0c72021-10-19 10:04:22 +08001846static void mtk_hnat_dscp_update(struct sk_buff *skb, struct foe_entry *entry)
1847{
1848 struct iphdr *iph;
1849 struct ethhdr *eth;
1850 struct ipv6hdr *ip6h;
1851 bool flag = false;
1852
1853 eth = eth_hdr(skb);
1854 switch (ntohs(eth->h_proto)) {
1855 case ETH_P_IP:
1856 iph = ip_hdr(skb);
1857 if (entry->ipv4_hnapt.iblk2.dscp != iph->tos)
1858 flag = true;
1859 break;
1860 case ETH_P_IPV6:
1861 ip6h = ipv6_hdr(skb);
1862 if (entry->ipv6_5t_route.iblk2.dscp !=
1863 (ip6h->priority << 4 |
1864 (ip6h->flow_lbl[0] >> 4)))
1865 flag = true;
1866 break;
1867 default:
1868 return;
1869 }
1870
1871 if (flag) {
1872 memset(entry, 0, sizeof(struct foe_entry));
1873 hnat_cache_ebl(1);
1874 }
1875}
1876
developer30a47682021-11-02 17:06:14 +08001877static void mtk_hnat_nf_update(struct sk_buff *skb)
1878{
1879 struct nf_conn *ct;
1880 struct nf_conn_acct *acct;
1881 struct nf_conn_counter *counter;
1882 enum ip_conntrack_info ctinfo;
1883 struct hnat_accounting diff;
1884
1885 ct = nf_ct_get(skb, &ctinfo);
1886 if (ct) {
1887 if (!hnat_get_count(hnat_priv, skb_hnat_ppe(skb), skb_hnat_entry(skb), &diff))
1888 return;
1889
1890 acct = nf_conn_acct_find(ct);
1891 if (acct) {
1892 counter = acct->counter;
1893 atomic64_add(diff.packets, &counter[CTINFO2DIR(ctinfo)].packets);
1894 atomic64_add(diff.bytes, &counter[CTINFO2DIR(ctinfo)].bytes);
1895 }
1896 }
1897}
1898
developerfd40db22021-04-29 10:08:25 +08001899static unsigned int mtk_hnat_nf_post_routing(
1900 struct sk_buff *skb, const struct net_device *out,
1901 unsigned int (*fn)(struct sk_buff *, const struct net_device *,
1902 struct flow_offload_hw_path *),
1903 const char *func)
1904{
1905 struct foe_entry *entry;
1906 struct flow_offload_hw_path hw_path = { .dev = (struct net_device*)out,
developere5763512021-05-21 01:04:58 +08001907 .virt_dev = (struct net_device*)out };
developerfd40db22021-04-29 10:08:25 +08001908 const struct net_device *arp_dev = out;
1909
1910 if (skb_hnat_alg(skb) || unlikely(!is_magic_tag_valid(skb) ||
1911 !IS_SPACE_AVAILABLE_HEAD(skb)))
1912 return 0;
1913
1914 if (unlikely(!skb_hnat_is_hashed(skb)))
1915 return 0;
1916
1917 if (out->netdev_ops->ndo_flow_offload_check) {
developere5763512021-05-21 01:04:58 +08001918 out->netdev_ops->ndo_flow_offload_check(&hw_path);
developerfd40db22021-04-29 10:08:25 +08001919 out = (IS_GMAC1_MODE) ? hw_path.virt_dev : hw_path.dev;
1920 }
1921
1922 if (!IS_LAN(out) && !IS_WAN(out) && !IS_EXT(out))
1923 return 0;
1924
1925 trace_printk("[%s] case hit, %x-->%s, reason=%x\n", __func__,
1926 skb_hnat_iface(skb), out->name, skb_hnat_reason(skb));
1927
developer471f6562021-05-10 20:48:34 +08001928 entry = &hnat_priv->foe_table_cpu[skb_hnat_ppe(skb)][skb_hnat_entry(skb)];
developerfd40db22021-04-29 10:08:25 +08001929
1930 switch (skb_hnat_reason(skb)) {
1931 case HIT_UNBIND_RATE_REACH:
1932 if (entry_hnat_is_bound(entry))
1933 break;
1934
1935 if (fn && !mtk_hnat_accel_type(skb))
1936 break;
1937
1938 if (fn && fn(skb, arp_dev, &hw_path))
1939 break;
1940
1941 skb_to_hnat_info(skb, out, entry, &hw_path);
1942 break;
1943 case HIT_BIND_KEEPALIVE_DUP_OLD_HDR:
developer30a47682021-11-02 17:06:14 +08001944 /* update hnat count to nf_conntrack by keepalive */
1945 if (hnat_priv->data->per_flow_accounting && hnat_priv->nf_stat_en)
1946 mtk_hnat_nf_update(skb);
1947
developerfd40db22021-04-29 10:08:25 +08001948 if (fn && !mtk_hnat_accel_type(skb))
1949 break;
1950
developer6f4a0c72021-10-19 10:04:22 +08001951 /* update dscp for qos */
1952 mtk_hnat_dscp_update(skb, entry);
1953
developerfd40db22021-04-29 10:08:25 +08001954 /* update mcast timestamp*/
1955 if (hnat_priv->data->version == MTK_HNAT_V3 &&
1956 hnat_priv->data->mcast && entry->bfib1.sta == 1)
1957 entry->ipv4_hnapt.m_timestamp = foe_timestamp(hnat_priv);
1958
1959 if (entry_hnat_is_bound(entry)) {
1960 memset(skb_hnat_info(skb), 0, FOE_INFO_LEN);
1961
1962 return -1;
1963 }
1964 break;
1965 case HIT_BIND_MULTICAST_TO_CPU:
1966 case HIT_BIND_MULTICAST_TO_GMAC_CPU:
1967 /*do not forward to gdma again,if ppe already done it*/
1968 if (IS_LAN(out) || IS_WAN(out))
1969 return -1;
1970 break;
1971 }
1972
1973 return 0;
1974}
1975
1976static unsigned int
1977mtk_hnat_ipv6_nf_local_out(void *priv, struct sk_buff *skb,
1978 const struct nf_hook_state *state)
1979{
1980 struct foe_entry *entry;
1981 struct ipv6hdr *ip6h;
1982 struct iphdr _iphdr;
1983 const struct iphdr *iph;
1984 struct tcpudphdr _ports;
1985 const struct tcpudphdr *pptr;
1986 int udp = 0;
1987
1988 if (unlikely(!skb_hnat_is_hashed(skb)))
1989 return NF_ACCEPT;
1990
developer471f6562021-05-10 20:48:34 +08001991 entry = &hnat_priv->foe_table_cpu[skb_hnat_ppe(skb)][skb_hnat_entry(skb)];
developerfd40db22021-04-29 10:08:25 +08001992 if (skb_hnat_reason(skb) == HIT_UNBIND_RATE_REACH) {
1993 ip6h = ipv6_hdr(skb);
1994 if (ip6h->nexthdr == NEXTHDR_IPIP) {
1995 /* Map-E LAN->WAN: need to record orig info before fn. */
1996 if (mape_toggle) {
1997 iph = skb_header_pointer(skb, IPV6_HDR_LEN,
1998 sizeof(_iphdr), &_iphdr);
developer4c32b7a2021-11-13 16:46:43 +08001999 if (unlikely(!iph))
2000 return NF_ACCEPT;
2001
developerfd40db22021-04-29 10:08:25 +08002002 switch (iph->protocol) {
2003 case IPPROTO_UDP:
2004 udp = 1;
2005 case IPPROTO_TCP:
2006 break;
2007
2008 default:
2009 return NF_ACCEPT;
2010 }
2011
2012 pptr = skb_header_pointer(skb, IPV6_HDR_LEN + iph->ihl * 4,
2013 sizeof(_ports), &_ports);
developer4c32b7a2021-11-13 16:46:43 +08002014 if (unlikely(!pptr))
2015 return NF_ACCEPT;
2016
developerfd40db22021-04-29 10:08:25 +08002017 entry->bfib1.udp = udp;
2018
2019#if defined(CONFIG_MEDIATEK_NETSYS_V2)
2020 entry->bfib1.pkt_type = IPV4_MAP_E;
2021 entry->ipv4_dslite.iblk2.dscp = iph->tos;
2022 entry->ipv4_dslite.new_sip = ntohl(iph->saddr);
2023 entry->ipv4_dslite.new_dip = ntohl(iph->daddr);
2024 entry->ipv4_dslite.new_sport = ntohs(pptr->src);
2025 entry->ipv4_dslite.new_dport = ntohs(pptr->dst);
2026#else
2027 entry->ipv4_hnapt.iblk2.dscp = iph->tos;
2028 entry->ipv4_hnapt.new_sip = ntohl(iph->saddr);
2029 entry->ipv4_hnapt.new_dip = ntohl(iph->daddr);
2030 entry->ipv4_hnapt.new_sport = ntohs(pptr->src);
2031 entry->ipv4_hnapt.new_dport = ntohs(pptr->dst);
2032#endif
2033 } else {
2034 entry->bfib1.pkt_type = IPV4_DSLITE;
2035 }
2036 }
2037 }
2038 return NF_ACCEPT;
2039}
2040
2041static unsigned int
2042mtk_hnat_ipv6_nf_post_routing(void *priv, struct sk_buff *skb,
2043 const struct nf_hook_state *state)
2044{
2045 post_routing_print(skb, state->in, state->out, __func__);
2046
2047 if (!mtk_hnat_nf_post_routing(skb, state->out, hnat_ipv6_get_nexthop,
2048 __func__))
2049 return NF_ACCEPT;
2050
2051 trace_printk(
2052 "%s:drop (iif=0x%x, out_dev=%s, CB2=0x%x, ppe_hash=0x%x, sport=0x%x, reason=0x%x, alg=0x%x)\n",
2053 __func__, skb_hnat_iface(skb), state->out->name, HNAT_SKB_CB2(skb)->magic,
2054 skb_hnat_entry(skb), skb_hnat_sport(skb), skb_hnat_reason(skb),
2055 skb_hnat_alg(skb));
2056
2057 return NF_DROP;
2058}
2059
2060static unsigned int
2061mtk_hnat_ipv4_nf_post_routing(void *priv, struct sk_buff *skb,
2062 const struct nf_hook_state *state)
2063{
2064 post_routing_print(skb, state->in, state->out, __func__);
2065
2066 if (!mtk_hnat_nf_post_routing(skb, state->out, hnat_ipv4_get_nexthop,
2067 __func__))
2068 return NF_ACCEPT;
2069
2070 trace_printk(
2071 "%s:drop (iif=0x%x, out_dev=%s, CB2=0x%x, ppe_hash=0x%x, sport=0x%x, reason=0x%x, alg=0x%x)\n",
2072 __func__, skb_hnat_iface(skb), state->out->name, HNAT_SKB_CB2(skb)->magic,
2073 skb_hnat_entry(skb), skb_hnat_sport(skb), skb_hnat_reason(skb),
2074 skb_hnat_alg(skb));
2075
2076 return NF_DROP;
2077}
2078
2079static unsigned int
2080mtk_pong_hqos_handler(void *priv, struct sk_buff *skb,
2081 const struct nf_hook_state *state)
2082{
developerfd40db22021-04-29 10:08:25 +08002083 struct vlan_ethhdr *veth = (struct vlan_ethhdr *)skb_mac_header(skb);
2084
developeraf07fad2021-11-19 17:53:42 +08002085 if (qos_toggle && eth_hdr(skb)->h_proto == HQOS_MAGIC_TAG) {
developerfd40db22021-04-29 10:08:25 +08002086 skb_hnat_entry(skb) = ntohs(veth->h_vlan_TCI) & 0x3fff;
2087 skb_hnat_reason(skb) = HIT_BIND_FORCE_TO_CPU;
2088 }
developerfd40db22021-04-29 10:08:25 +08002089
2090 if (skb_hnat_iface(skb) == FOE_MAGIC_EXT)
2091 clr_from_extge(skb);
2092
2093 /* packets from external devices -> xxx ,step 2, learning stage */
developeraf07fad2021-11-19 17:53:42 +08002094 if (do_ext2ge_fast_learn(state->in, skb) && (!qos_toggle ||
2095 (qos_toggle && eth_hdr(skb)->h_proto != HQOS_MAGIC_TAG))) {
developerfd40db22021-04-29 10:08:25 +08002096 if (!do_hnat_ext_to_ge2(skb, __func__))
2097 return NF_STOLEN;
2098 goto drop;
2099 }
2100
2101 /* packets form ge -> external device */
2102 if (do_ge2ext_fast(state->in, skb)) {
2103 if (!do_hnat_ge_to_ext(skb, __func__))
2104 return NF_STOLEN;
2105 goto drop;
2106 }
2107
2108 return NF_ACCEPT;
2109drop:
2110 printk_ratelimited(KERN_WARNING
2111 "%s:drop (in_dev=%s, iif=0x%x, CB2=0x%x, ppe_hash=0x%x, sport=0x%x, reason=0x%x, alg=0x%x)\n",
2112 __func__, state->in->name, skb_hnat_iface(skb),
2113 HNAT_SKB_CB2(skb)->magic, skb_hnat_entry(skb),
2114 skb_hnat_sport(skb), skb_hnat_reason(skb),
2115 skb_hnat_alg(skb));
2116
2117 return NF_DROP;
2118}
2119
2120static unsigned int
2121mtk_hnat_br_nf_local_out(void *priv, struct sk_buff *skb,
2122 const struct nf_hook_state *state)
2123{
2124 post_routing_print(skb, state->in, state->out, __func__);
2125
2126 if (!mtk_hnat_nf_post_routing(skb, state->out, 0, __func__))
2127 return NF_ACCEPT;
2128
2129 trace_printk(
2130 "%s:drop (iif=0x%x, out_dev=%s, CB2=0x%x, ppe_hash=0x%x, sport=0x%x, reason=0x%x, alg=0x%x)\n",
2131 __func__, skb_hnat_iface(skb), state->out->name, HNAT_SKB_CB2(skb)->magic,
2132 skb_hnat_entry(skb), skb_hnat_sport(skb), skb_hnat_reason(skb),
2133 skb_hnat_alg(skb));
2134
2135 return NF_DROP;
2136}
2137
2138static unsigned int
2139mtk_hnat_ipv4_nf_local_out(void *priv, struct sk_buff *skb,
2140 const struct nf_hook_state *state)
2141{
2142 struct sk_buff *new_skb;
2143 struct foe_entry *entry;
2144 struct iphdr *iph;
2145
2146 if (!skb_hnat_is_hashed(skb))
2147 return NF_ACCEPT;
2148
developer471f6562021-05-10 20:48:34 +08002149 entry = &hnat_priv->foe_table_cpu[skb_hnat_ppe(skb)][skb_hnat_entry(skb)];
developerfd40db22021-04-29 10:08:25 +08002150
2151 if (unlikely(skb_headroom(skb) < FOE_INFO_LEN)) {
2152 new_skb = skb_realloc_headroom(skb, FOE_INFO_LEN);
2153 if (!new_skb) {
2154 dev_info(hnat_priv->dev, "%s:drop\n", __func__);
2155 return NF_DROP;
2156 }
2157 dev_kfree_skb(skb);
2158 skb = new_skb;
2159 }
2160
2161 /* Make the flow from local not be bound. */
2162 iph = ip_hdr(skb);
2163 if (iph->protocol == IPPROTO_IPV6) {
2164 entry->udib1.pkt_type = IPV6_6RD;
2165 hnat_set_head_frags(state, skb, 0, hnat_set_alg);
2166 } else {
2167 hnat_set_head_frags(state, skb, 1, hnat_set_alg);
2168 }
2169
2170 return NF_ACCEPT;
2171}
2172
2173static unsigned int mtk_hnat_br_nf_forward(void *priv,
2174 struct sk_buff *skb,
2175 const struct nf_hook_state *state)
2176{
developer99506e52021-06-30 22:03:02 +08002177 if ((hnat_priv->data->version == MTK_HNAT_V2) &&
2178 unlikely(IS_EXT(state->in) && IS_EXT(state->out)))
developerfd40db22021-04-29 10:08:25 +08002179 hnat_set_head_frags(state, skb, 1, hnat_set_alg);
2180
2181 return NF_ACCEPT;
2182}
2183
2184static struct nf_hook_ops mtk_hnat_nf_ops[] __read_mostly = {
2185 {
2186 .hook = mtk_hnat_ipv4_nf_pre_routing,
2187 .pf = NFPROTO_IPV4,
2188 .hooknum = NF_INET_PRE_ROUTING,
2189 .priority = NF_IP_PRI_FIRST + 1,
2190 },
2191 {
2192 .hook = mtk_hnat_ipv6_nf_pre_routing,
2193 .pf = NFPROTO_IPV6,
2194 .hooknum = NF_INET_PRE_ROUTING,
2195 .priority = NF_IP_PRI_FIRST + 1,
2196 },
2197 {
2198 .hook = mtk_hnat_ipv6_nf_post_routing,
2199 .pf = NFPROTO_IPV6,
2200 .hooknum = NF_INET_POST_ROUTING,
2201 .priority = NF_IP_PRI_LAST,
2202 },
2203 {
2204 .hook = mtk_hnat_ipv6_nf_local_out,
2205 .pf = NFPROTO_IPV6,
2206 .hooknum = NF_INET_LOCAL_OUT,
2207 .priority = NF_IP_PRI_LAST,
2208 },
2209 {
2210 .hook = mtk_hnat_ipv4_nf_post_routing,
2211 .pf = NFPROTO_IPV4,
2212 .hooknum = NF_INET_POST_ROUTING,
2213 .priority = NF_IP_PRI_LAST,
2214 },
2215 {
2216 .hook = mtk_hnat_ipv4_nf_local_out,
2217 .pf = NFPROTO_IPV4,
2218 .hooknum = NF_INET_LOCAL_OUT,
2219 .priority = NF_IP_PRI_LAST,
2220 },
2221 {
2222 .hook = mtk_hnat_br_nf_local_in,
2223 .pf = NFPROTO_BRIDGE,
2224 .hooknum = NF_BR_LOCAL_IN,
2225 .priority = NF_BR_PRI_FIRST,
2226 },
2227 {
2228 .hook = mtk_hnat_br_nf_local_out,
2229 .pf = NFPROTO_BRIDGE,
2230 .hooknum = NF_BR_LOCAL_OUT,
2231 .priority = NF_BR_PRI_LAST - 1,
2232 },
2233 {
2234 .hook = mtk_pong_hqos_handler,
2235 .pf = NFPROTO_BRIDGE,
2236 .hooknum = NF_BR_PRE_ROUTING,
developer2b85f652021-08-19 16:09:50 +08002237 .priority = NF_BR_PRI_FIRST + 1,
developerfd40db22021-04-29 10:08:25 +08002238 },
2239};
2240
2241int hnat_register_nf_hooks(void)
2242{
2243 return nf_register_net_hooks(&init_net, mtk_hnat_nf_ops, ARRAY_SIZE(mtk_hnat_nf_ops));
2244}
2245
2246void hnat_unregister_nf_hooks(void)
2247{
2248 nf_unregister_net_hooks(&init_net, mtk_hnat_nf_ops, ARRAY_SIZE(mtk_hnat_nf_ops));
2249}
2250
2251int whnat_adjust_nf_hooks(void)
2252{
2253 struct nf_hook_ops *hook = mtk_hnat_nf_ops;
2254 unsigned int n = ARRAY_SIZE(mtk_hnat_nf_ops);
2255
developerfd40db22021-04-29 10:08:25 +08002256 while (n-- > 0) {
2257 if (hook[n].hook == mtk_hnat_br_nf_local_in) {
2258 hook[n].hooknum = NF_BR_PRE_ROUTING;
developer2b85f652021-08-19 16:09:50 +08002259 hook[n].priority = NF_BR_PRI_FIRST + 1;
developerfd40db22021-04-29 10:08:25 +08002260 } else if (hook[n].hook == mtk_hnat_br_nf_local_out) {
2261 hook[n].hooknum = NF_BR_POST_ROUTING;
2262 } else if (hook[n].hook == mtk_pong_hqos_handler) {
2263 hook[n].hook = mtk_hnat_br_nf_forward;
2264 hook[n].hooknum = NF_BR_FORWARD;
2265 hook[n].priority = NF_BR_PRI_LAST - 1;
2266 }
2267 }
2268
2269 return 0;
2270}
2271
developerfd40db22021-04-29 10:08:25 +08002272int mtk_hqos_ptype_cb(struct sk_buff *skb, struct net_device *dev,
2273 struct packet_type *pt, struct net_device *unused)
2274{
2275 struct vlan_ethhdr *veth = (struct vlan_ethhdr *)skb_mac_header(skb);
2276
2277 skb_hnat_entry(skb) = ntohs(veth->h_vlan_TCI) & 0x3fff;
2278 skb_hnat_reason(skb) = HIT_BIND_FORCE_TO_CPU;
2279
2280 do_hnat_ge_to_ext(skb, __func__);
2281
2282 return 0;
2283}
developerfd40db22021-04-29 10:08:25 +08002284