blob: ec5e4692bececae4aac33e6604e0d11923c70572 [file] [log] [blame]
developerfd40db22021-04-29 10:08:25 +08001/* This program is free software; you can redistribute it and/or modify
2 * it under the terms of the GNU General Public License as published by
3 * the Free Software Foundation; version 2 of the License
4 *
5 * This program is distributed in the hope that it will be useful,
6 * but WITHOUT ANY WARRANTY; without even the implied warranty of
7 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
8 * GNU General Public License for more details.
9 *
10 * Copyright (C) 2014-2016 Sean Wang <sean.wang@mediatek.com>
11 * Copyright (C) 2016-2017 John Crispin <blogic@openwrt.org>
12 */
13
14#include <linux/netfilter_bridge.h>
15#include <linux/netfilter_ipv6.h>
16
17#include <net/arp.h>
18#include <net/neighbour.h>
19#include <net/netfilter/nf_conntrack_helper.h>
20#include <net/netfilter/nf_flow_table.h>
21#include <net/ipv6.h>
22#include <net/ip6_route.h>
23#include <net/ip.h>
24#include <net/tcp.h>
25#include <net/udp.h>
26
27#include "nf_hnat_mtk.h"
28#include "hnat.h"
29
30#include "../mtk_eth_soc.h"
31
32#define do_ge2ext_fast(dev, skb) \
33 ((IS_LAN(dev) || IS_WAN(dev) || IS_PPD(dev)) && \
34 skb_hnat_is_hashed(skb) && \
35 skb_hnat_reason(skb) == HIT_BIND_FORCE_TO_CPU)
36#define do_ext2ge_fast_learn(dev, skb) \
37 (IS_PPD(dev) && \
38 (skb_hnat_sport(skb) == NR_PDMA_PORT || \
39 skb_hnat_sport(skb) == NR_QDMA_PORT) && \
40 ((get_dev_from_index(skb->vlan_tci & VLAN_VID_MASK)) || \
41 get_wandev_from_index(skb->vlan_tci & VLAN_VID_MASK)))
42#define do_mape_w2l_fast(dev, skb) \
43 (mape_toggle && IS_WAN(dev) && (!is_from_mape(skb)))
44
45static struct ipv6hdr mape_l2w_v6h;
46static struct ipv6hdr mape_w2l_v6h;
47static inline uint8_t get_wifi_hook_if_index_from_dev(const struct net_device *dev)
48{
49 int i;
50
51 for (i = 1; i < MAX_IF_NUM; i++) {
52 if (hnat_priv->wifi_hook_if[i] == dev)
53 return i;
54 }
55
56 return 0;
57}
58
59static inline int get_ext_device_number(void)
60{
61 int i, number = 0;
62
63 for (i = 0; i < MAX_EXT_DEVS && hnat_priv->ext_if[i]; i++)
64 number += 1;
65 return number;
66}
67
68static inline int find_extif_from_devname(const char *name)
69{
70 int i;
71 struct extdev_entry *ext_entry;
72
73 for (i = 0; i < MAX_EXT_DEVS && hnat_priv->ext_if[i]; i++) {
74 ext_entry = hnat_priv->ext_if[i];
75 if (!strcmp(name, ext_entry->name))
76 return 1;
77 }
78 return 0;
79}
80
81static inline int get_index_from_dev(const struct net_device *dev)
82{
83 int i;
84 struct extdev_entry *ext_entry;
85
86 for (i = 0; i < MAX_EXT_DEVS && hnat_priv->ext_if[i]; i++) {
87 ext_entry = hnat_priv->ext_if[i];
88 if (dev == ext_entry->dev)
89 return ext_entry->dev->ifindex;
90 }
91 return 0;
92}
93
94static inline struct net_device *get_dev_from_index(int index)
95{
96 int i;
97 struct extdev_entry *ext_entry;
98 struct net_device *dev = 0;
99
100 for (i = 0; i < MAX_EXT_DEVS && hnat_priv->ext_if[i]; i++) {
101 ext_entry = hnat_priv->ext_if[i];
102 if (ext_entry->dev && index == ext_entry->dev->ifindex) {
103 dev = ext_entry->dev;
104 break;
105 }
106 }
107 return dev;
108}
109
110static inline struct net_device *get_wandev_from_index(int index)
111{
developer8c9c0d02021-06-18 16:15:37 +0800112 if (!hnat_priv->g_wandev)
113 hnat_priv->g_wandev = dev_get_by_name(&init_net, hnat_priv->wan);
developerfd40db22021-04-29 10:08:25 +0800114
developer8c9c0d02021-06-18 16:15:37 +0800115 if (hnat_priv->g_wandev && hnat_priv->g_wandev->ifindex == index)
116 return hnat_priv->g_wandev;
developerfd40db22021-04-29 10:08:25 +0800117 return NULL;
118}
119
120static inline int extif_set_dev(struct net_device *dev)
121{
122 int i;
123 struct extdev_entry *ext_entry;
124
125 for (i = 0; i < MAX_EXT_DEVS && hnat_priv->ext_if[i]; i++) {
126 ext_entry = hnat_priv->ext_if[i];
127 if (!strcmp(dev->name, ext_entry->name) && !ext_entry->dev) {
128 dev_hold(dev);
129 ext_entry->dev = dev;
130 pr_info("%s(%s)\n", __func__, dev->name);
131
132 return ext_entry->dev->ifindex;
133 }
134 }
135
136 return -1;
137}
138
139static inline int extif_put_dev(struct net_device *dev)
140{
141 int i;
142 struct extdev_entry *ext_entry;
143
144 for (i = 0; i < MAX_EXT_DEVS && hnat_priv->ext_if[i]; i++) {
145 ext_entry = hnat_priv->ext_if[i];
146 if (ext_entry->dev == dev) {
147 ext_entry->dev = NULL;
148 dev_put(dev);
149 pr_info("%s(%s)\n", __func__, dev->name);
150
developerbc53e5f2021-05-21 10:07:17 +0800151 return 0;
developerfd40db22021-04-29 10:08:25 +0800152 }
153 }
154
155 return -1;
156}
157
158int ext_if_add(struct extdev_entry *ext_entry)
159{
160 int len = get_ext_device_number();
161
162 hnat_priv->ext_if[len++] = ext_entry;
163 return len;
164}
165
166int ext_if_del(struct extdev_entry *ext_entry)
167{
168 int i, j;
169
170 for (i = 0; i < MAX_EXT_DEVS; i++) {
171 if (hnat_priv->ext_if[i] == ext_entry) {
172 for (j = i; hnat_priv->ext_if[j] && j < MAX_EXT_DEVS - 1; j++)
173 hnat_priv->ext_if[j] = hnat_priv->ext_if[j + 1];
174 hnat_priv->ext_if[j] = NULL;
175 break;
176 }
177 }
178
179 return i;
180}
181
182void foe_clear_all_bind_entries(struct net_device *dev)
183{
developer471f6562021-05-10 20:48:34 +0800184 int i, hash_index;
developerfd40db22021-04-29 10:08:25 +0800185 struct foe_entry *entry;
186
187 if (!IS_LAN(dev) && !IS_WAN(dev) &&
188 !find_extif_from_devname(dev->name) &&
189 !dev->netdev_ops->ndo_flow_offload_check)
190 return;
191
developer471f6562021-05-10 20:48:34 +0800192 for (i = 0; i < CFG_PPE_NUM; i++) {
193 cr_set_field(hnat_priv->ppe_base[i] + PPE_TB_CFG,
194 SMA, SMA_ONLY_FWD_CPU);
195
196 for (hash_index = 0; hash_index < hnat_priv->foe_etry_num; hash_index++) {
197 entry = hnat_priv->foe_table_cpu[i] + hash_index;
198 if (entry->bfib1.state == BIND) {
199 entry->ipv4_hnapt.udib1.state = INVALID;
200 entry->ipv4_hnapt.udib1.time_stamp =
201 readl((hnat_priv->fe_base + 0x0010)) & 0xFF;
202 }
developerfd40db22021-04-29 10:08:25 +0800203 }
204 }
205
206 /* clear HWNAT cache */
207 hnat_cache_ebl(1);
208
209 mod_timer(&hnat_priv->hnat_sma_build_entry_timer, jiffies + 3 * HZ);
210}
211
212static void gmac_ppe_fwd_enable(struct net_device *dev)
213{
214 if (IS_LAN(dev) || IS_GMAC1_MODE)
215 set_gmac_ppe_fwd(0, 1);
216 else if (IS_WAN(dev))
217 set_gmac_ppe_fwd(1, 1);
218}
219
220int nf_hnat_netdevice_event(struct notifier_block *unused, unsigned long event,
221 void *ptr)
222{
223 struct net_device *dev;
224
225 dev = netdev_notifier_info_to_dev(ptr);
226
227 switch (event) {
228 case NETDEV_UP:
229 gmac_ppe_fwd_enable(dev);
230
231 extif_set_dev(dev);
232
233 break;
234 case NETDEV_GOING_DOWN:
235 if (!get_wifi_hook_if_index_from_dev(dev))
236 extif_put_dev(dev);
237
238 foe_clear_all_bind_entries(dev);
239
240 break;
developer8c9c0d02021-06-18 16:15:37 +0800241 case NETDEV_UNREGISTER:
242 if (IS_PPD(dev) && hnat_priv->g_ppdev) {
243 hnat_priv->g_ppdev = NULL;
244 dev_put(dev);
245 }
246 if (IS_WAN(dev) && hnat_priv->g_wandev) {
247 hnat_priv->g_wandev = NULL;
248 dev_put(dev);
249 }
250
251 break;
252 case NETDEV_REGISTER:
253 if (IS_PPD(dev) && !hnat_priv->g_ppdev)
254 hnat_priv->g_ppdev = dev_get_by_name(&init_net, hnat_priv->ppd);
255 if (IS_WAN(dev) && !hnat_priv->g_wandev)
256 hnat_priv->g_wandev = dev_get_by_name(&init_net, hnat_priv->wan);
257
258 break;
developerfd40db22021-04-29 10:08:25 +0800259 default:
260 break;
261 }
262
263 return NOTIFY_DONE;
264}
265
266void foe_clear_entry(struct neighbour *neigh)
267{
268 u32 *daddr = (u32 *)neigh->primary_key;
269 unsigned char h_dest[ETH_ALEN];
270 struct foe_entry *entry;
developer471f6562021-05-10 20:48:34 +0800271 int i, hash_index;
developerfd40db22021-04-29 10:08:25 +0800272 u32 dip;
273
274 dip = (u32)(*daddr);
275
developer471f6562021-05-10 20:48:34 +0800276 for (i = 0; i < CFG_PPE_NUM; i++) {
277 for (hash_index = 0; hash_index < hnat_priv->foe_etry_num; hash_index++) {
278 entry = hnat_priv->foe_table_cpu[i] + hash_index;
279 if (entry->bfib1.state == BIND &&
280 entry->ipv4_hnapt.new_dip == ntohl(dip)) {
281 *((u32 *)h_dest) = swab32(entry->ipv4_hnapt.dmac_hi);
282 *((u16 *)&h_dest[4]) =
283 swab16(entry->ipv4_hnapt.dmac_lo);
284 if (strncmp(h_dest, neigh->ha, ETH_ALEN) != 0) {
285 pr_info("%s: state=%d\n", __func__,
286 neigh->nud_state);
287 cr_set_field(hnat_priv->ppe_base[i] + PPE_TB_CFG,
288 SMA, SMA_ONLY_FWD_CPU);
developerfd40db22021-04-29 10:08:25 +0800289
developer471f6562021-05-10 20:48:34 +0800290 entry->ipv4_hnapt.udib1.state = INVALID;
291 entry->ipv4_hnapt.udib1.time_stamp =
292 readl((hnat_priv->fe_base + 0x0010)) & 0xFF;
developerfd40db22021-04-29 10:08:25 +0800293
developer471f6562021-05-10 20:48:34 +0800294 /* clear HWNAT cache */
295 hnat_cache_ebl(1);
developerfd40db22021-04-29 10:08:25 +0800296
developer471f6562021-05-10 20:48:34 +0800297 mod_timer(&hnat_priv->hnat_sma_build_entry_timer,
298 jiffies + 3 * HZ);
developerfd40db22021-04-29 10:08:25 +0800299
developer471f6562021-05-10 20:48:34 +0800300 pr_info("Delete old entry: dip =%pI4\n", &dip);
301 pr_info("Old mac= %pM\n", h_dest);
302 pr_info("New mac= %pM\n", neigh->ha);
303 }
developerfd40db22021-04-29 10:08:25 +0800304 }
305 }
306 }
307}
308
309int nf_hnat_netevent_handler(struct notifier_block *unused, unsigned long event,
310 void *ptr)
311{
312 struct net_device *dev = NULL;
313 struct neighbour *neigh = NULL;
314
315 switch (event) {
316 case NETEVENT_NEIGH_UPDATE:
317 neigh = ptr;
318 dev = neigh->dev;
319 if (dev)
320 foe_clear_entry(neigh);
321 break;
322 }
323
324 return NOTIFY_DONE;
325}
326
327unsigned int mape_add_ipv6_hdr(struct sk_buff *skb, struct ipv6hdr mape_ip6h)
328{
329 struct ethhdr *eth = NULL;
330 struct ipv6hdr *ip6h = NULL;
331 struct iphdr *iph = NULL;
332
333 if (skb_headroom(skb) < IPV6_HDR_LEN || skb_shared(skb) ||
334 (skb_cloned(skb) && !skb_clone_writable(skb, 0))) {
335 return -1;
336 }
337
338 /* point to L3 */
339 memcpy(skb->data - IPV6_HDR_LEN - ETH_HLEN, skb_push(skb, ETH_HLEN), ETH_HLEN);
340 memcpy(skb_push(skb, IPV6_HDR_LEN - ETH_HLEN), &mape_ip6h, IPV6_HDR_LEN);
341
342 eth = (struct ethhdr *)(skb->data - ETH_HLEN);
343 eth->h_proto = htons(ETH_P_IPV6);
344 skb->protocol = htons(ETH_P_IPV6);
345
346 iph = (struct iphdr *)(skb->data + IPV6_HDR_LEN);
347 ip6h = (struct ipv6hdr *)(skb->data);
348 ip6h->payload_len = iph->tot_len; /* maybe different with ipv4 */
349
350 skb_set_network_header(skb, 0);
351 skb_set_transport_header(skb, iph->ihl * 4 + IPV6_HDR_LEN);
352 return 0;
353}
354
355static void fix_skb_packet_type(struct sk_buff *skb, struct net_device *dev,
356 struct ethhdr *eth)
357{
358 skb->pkt_type = PACKET_HOST;
359 if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
360 if (ether_addr_equal_64bits(eth->h_dest, dev->broadcast))
361 skb->pkt_type = PACKET_BROADCAST;
362 else
363 skb->pkt_type = PACKET_MULTICAST;
364 }
365}
366
367unsigned int do_hnat_ext_to_ge(struct sk_buff *skb, const struct net_device *in,
368 const char *func)
369{
370 if (hnat_priv->g_ppdev && hnat_priv->g_ppdev->flags & IFF_UP) {
371 u16 vlan_id = 0;
372 skb_set_network_header(skb, 0);
373 skb_push(skb, ETH_HLEN);
374 set_to_ppe(skb);
375
376 vlan_id = skb_vlan_tag_get_id(skb);
377 if (vlan_id) {
378 skb = vlan_insert_tag(skb, skb->vlan_proto, skb->vlan_tci);
379 if (!skb)
380 return -1;
381 }
382
383 /*set where we come from*/
384 skb->vlan_proto = htons(ETH_P_8021Q);
385 skb->vlan_tci =
386 (VLAN_CFI_MASK | (in->ifindex & VLAN_VID_MASK));
387 trace_printk(
388 "%s: vlan_prot=0x%x, vlan_tci=%x, in->name=%s, skb->dev->name=%s\n",
389 __func__, ntohs(skb->vlan_proto), skb->vlan_tci,
390 in->name, hnat_priv->g_ppdev->name);
391 skb->dev = hnat_priv->g_ppdev;
392 dev_queue_xmit(skb);
393 trace_printk("%s: called from %s successfully\n", __func__, func);
394 return 0;
395 }
396
397 trace_printk("%s: called from %s fail\n", __func__, func);
398 return -1;
399}
400
401unsigned int do_hnat_ext_to_ge2(struct sk_buff *skb, const char *func)
402{
403 struct ethhdr *eth = eth_hdr(skb);
404 struct net_device *dev;
405 struct foe_entry *entry;
406
407 trace_printk("%s: vlan_prot=0x%x, vlan_tci=%x\n", __func__,
408 ntohs(skb->vlan_proto), skb->vlan_tci);
409
410 dev = get_dev_from_index(skb->vlan_tci & VLAN_VID_MASK);
411
412 if (dev) {
413 /*set where we to go*/
414 skb->dev = dev;
415 skb->vlan_proto = 0;
416 skb->vlan_tci = 0;
417
418 if (ntohs(eth->h_proto) == ETH_P_8021Q) {
419 skb = skb_vlan_untag(skb);
420 if (unlikely(!skb))
421 return -1;
422 }
423
424 if (IS_BOND_MODE &&
425 (((hnat_priv->data->version == MTK_HNAT_V4) &&
426 (skb_hnat_entry(skb) != 0x7fff)) ||
427 ((hnat_priv->data->version != MTK_HNAT_V4) &&
428 (skb_hnat_entry(skb) != 0x3fff))))
429 skb_set_hash(skb, skb_hnat_entry(skb) >> 1, PKT_HASH_TYPE_L4);
430
431 set_from_extge(skb);
432 fix_skb_packet_type(skb, skb->dev, eth);
433 netif_rx(skb);
434 trace_printk("%s: called from %s successfully\n", __func__,
435 func);
436 return 0;
437 } else {
438 /* MapE WAN --> LAN/WLAN PingPong. */
439 dev = get_wandev_from_index(skb->vlan_tci & VLAN_VID_MASK);
440 if (mape_toggle && dev) {
441 if (!mape_add_ipv6_hdr(skb, mape_w2l_v6h)) {
442 skb_set_mac_header(skb, -ETH_HLEN);
443 skb->dev = dev;
444 set_from_mape(skb);
445 skb->vlan_proto = 0;
446 skb->vlan_tci = 0;
447 fix_skb_packet_type(skb, skb->dev, eth_hdr(skb));
developer471f6562021-05-10 20:48:34 +0800448 entry = &hnat_priv->foe_table_cpu[skb_hnat_ppe(skb)][skb_hnat_entry(skb)];
developerfd40db22021-04-29 10:08:25 +0800449 entry->bfib1.pkt_type = IPV4_HNAPT;
450 netif_rx(skb);
451 return 0;
452 }
453 }
454 trace_printk("%s: called from %s fail\n", __func__, func);
455 return -1;
456 }
457}
458
459unsigned int do_hnat_ge_to_ext(struct sk_buff *skb, const char *func)
460{
461 /*set where we to go*/
462 u8 index;
463 struct foe_entry *entry;
464 struct net_device *dev;
465
developer471f6562021-05-10 20:48:34 +0800466 entry = &hnat_priv->foe_table_cpu[skb_hnat_ppe(skb)][skb_hnat_entry(skb)];
developerfd40db22021-04-29 10:08:25 +0800467
468 if (IS_IPV4_GRP(entry))
469 index = entry->ipv4_hnapt.act_dp;
470 else
471 index = entry->ipv6_5t_route.act_dp;
472
473 skb->dev = get_dev_from_index(index);
474
475#if defined(CONFIG_NET_MEDIATEK_HW_QOS)
476 if (eth_hdr(skb)->h_proto == HQOS_MAGIC_TAG) {
477 skb = skb_unshare(skb, GFP_ATOMIC);
478 if (!skb)
479 return NF_ACCEPT;
480
481 if (unlikely(!pskb_may_pull(skb, VLAN_HLEN)))
482 return NF_ACCEPT;
483
484 skb_pull_rcsum(skb, VLAN_HLEN);
485
486 memmove(skb->data - ETH_HLEN, skb->data - ETH_HLEN - VLAN_HLEN,
487 2 * ETH_ALEN);
488 }
489#endif
490
491 if (skb->dev) {
492 skb_set_network_header(skb, 0);
493 skb_push(skb, ETH_HLEN);
494 dev_queue_xmit(skb);
495 trace_printk("%s: called from %s successfully\n", __func__,
496 func);
497 return 0;
498 } else {
499 if (mape_toggle) {
500 /* Add ipv6 header mape for lan/wlan -->wan */
501 dev = get_wandev_from_index(index);
502 if (dev) {
503 if (!mape_add_ipv6_hdr(skb, mape_l2w_v6h)) {
504 skb_set_network_header(skb, 0);
505 skb_push(skb, ETH_HLEN);
506 skb_set_mac_header(skb, 0);
507 skb->dev = dev;
508 dev_queue_xmit(skb);
509 return 0;
510 }
511 trace_printk("%s: called from %s fail[MapE]\n", __func__,
512 func);
513 return -1;
514 }
515 }
516 }
517 /*if external devices is down, invalidate related ppe entry*/
518 if (entry_hnat_is_bound(entry)) {
519 entry->bfib1.state = INVALID;
520 if (IS_IPV4_GRP(entry))
521 entry->ipv4_hnapt.act_dp = 0;
522 else
523 entry->ipv6_5t_route.act_dp = 0;
524
525 /* clear HWNAT cache */
526 hnat_cache_ebl(1);
527 }
528 trace_printk("%s: called from %s fail, index=%x\n", __func__,
529 func, index);
530 return -1;
531}
532
533static void pre_routing_print(struct sk_buff *skb, const struct net_device *in,
534 const struct net_device *out, const char *func)
535{
536 trace_printk(
537 "[%s]: %s(iif=0x%x CB2=0x%x)-->%s (ppe_hash=0x%x) sport=0x%x reason=0x%x alg=0x%x from %s\n",
538 __func__, in->name, skb_hnat_iface(skb),
539 HNAT_SKB_CB2(skb)->magic, out->name, skb_hnat_entry(skb),
540 skb_hnat_sport(skb), skb_hnat_reason(skb), skb_hnat_alg(skb),
541 func);
542}
543
544static void post_routing_print(struct sk_buff *skb, const struct net_device *in,
545 const struct net_device *out, const char *func)
546{
547 trace_printk(
548 "[%s]: %s(iif=0x%x, CB2=0x%x)-->%s (ppe_hash=0x%x) sport=0x%x reason=0x%x alg=0x%x from %s\n",
549 __func__, in->name, skb_hnat_iface(skb),
550 HNAT_SKB_CB2(skb)->magic, out->name, skb_hnat_entry(skb),
551 skb_hnat_sport(skb), skb_hnat_reason(skb), skb_hnat_alg(skb),
552 func);
553}
554
555static inline void hnat_set_iif(const struct nf_hook_state *state,
556 struct sk_buff *skb, int val)
557{
developer40017972021-06-29 14:27:35 +0800558 if (IS_WHNAT(state->in) && FROM_WED(skb)) {
developere567ad32021-05-25 17:16:17 +0800559 return;
560 } else if (IS_LAN(state->in)) {
developerfd40db22021-04-29 10:08:25 +0800561 skb_hnat_iface(skb) = FOE_MAGIC_GE_LAN;
562 } else if (IS_PPD(state->in)) {
563 skb_hnat_iface(skb) = FOE_MAGIC_GE_PPD;
564 } else if (IS_EXT(state->in)) {
565 skb_hnat_iface(skb) = FOE_MAGIC_EXT;
566 } else if (IS_WAN(state->in)) {
567 skb_hnat_iface(skb) = FOE_MAGIC_GE_WAN;
developerfd40db22021-04-29 10:08:25 +0800568 } else if (!IS_BR(state->in)) {
developer99506e52021-06-30 22:03:02 +0800569 if (state->in->netdev_ops->ndo_flow_offload_check) {
570 skb_hnat_iface(skb) = FOE_MAGIC_GE_VIRTUAL;
571 } else {
572 skb_hnat_iface(skb) = FOE_INVALID;
developerfd40db22021-04-29 10:08:25 +0800573
developer99506e52021-06-30 22:03:02 +0800574 if (is_magic_tag_valid(skb) &&
575 IS_SPACE_AVAILABLE_HEAD(skb))
576 memset(skb_hnat_info(skb), 0, FOE_INFO_LEN);
577 }
developerfd40db22021-04-29 10:08:25 +0800578 }
579}
580
581static inline void hnat_set_alg(const struct nf_hook_state *state,
582 struct sk_buff *skb, int val)
583{
584 skb_hnat_alg(skb) = val;
585}
586
587static inline void hnat_set_head_frags(const struct nf_hook_state *state,
588 struct sk_buff *head_skb, int val,
589 void (*fn)(const struct nf_hook_state *state,
590 struct sk_buff *skb, int val))
591{
592 struct sk_buff *segs = skb_shinfo(head_skb)->frag_list;
593
594 fn(state, head_skb, val);
595 while (segs) {
596 fn(state, segs, val);
597 segs = segs->next;
598 }
599}
600
601unsigned int do_hnat_mape_w2l_fast(struct sk_buff *skb, const struct net_device *in,
602 const char *func)
603{
604 struct ipv6hdr *ip6h = ipv6_hdr(skb);
605 struct iphdr _iphdr;
606 struct iphdr *iph;
607 struct ethhdr *eth;
608
609 /* WAN -> LAN/WLAN MapE. */
610 if (mape_toggle && (ip6h->nexthdr == NEXTHDR_IPIP)) {
611 iph = skb_header_pointer(skb, IPV6_HDR_LEN, sizeof(_iphdr), &_iphdr);
612 switch (iph->protocol) {
613 case IPPROTO_UDP:
614 case IPPROTO_TCP:
615 break;
616 default:
617 return -1;
618 }
619 mape_w2l_v6h = *ip6h;
620
621 /* Remove ipv6 header. */
622 memcpy(skb->data + IPV6_HDR_LEN - ETH_HLEN,
623 skb->data - ETH_HLEN, ETH_HLEN);
624 skb_pull(skb, IPV6_HDR_LEN - ETH_HLEN);
625 skb_set_mac_header(skb, 0);
626 skb_set_network_header(skb, ETH_HLEN);
627 skb_set_transport_header(skb, ETH_HLEN + sizeof(_iphdr));
628
629 eth = eth_hdr(skb);
630 eth->h_proto = htons(ETH_P_IP);
631 set_to_ppe(skb);
632
633 skb->vlan_proto = htons(ETH_P_8021Q);
634 skb->vlan_tci =
635 (VLAN_CFI_MASK | (in->ifindex & VLAN_VID_MASK));
636
637 if (!hnat_priv->g_ppdev)
638 hnat_priv->g_ppdev = dev_get_by_name(&init_net, hnat_priv->ppd);
639
640 skb->dev = hnat_priv->g_ppdev;
641 skb->protocol = htons(ETH_P_IP);
642
643 dev_queue_xmit(skb);
644
645 return 0;
646 }
647 return -1;
648}
649
650static unsigned int is_ppe_support_type(struct sk_buff *skb)
651{
652 struct ethhdr *eth = NULL;
653 struct iphdr *iph = NULL;
654 struct ipv6hdr *ip6h = NULL;
655 struct iphdr _iphdr;
656
657 eth = eth_hdr(skb);
developerfd2d7422021-06-09 17:09:39 +0800658 if (!is_magic_tag_valid(skb) || !IS_SPACE_AVAILABLE_HEAD(skb) ||
659 is_broadcast_ether_addr(eth->h_dest))
developerfd40db22021-04-29 10:08:25 +0800660 return 0;
661
662 switch (ntohs(skb->protocol)) {
663 case ETH_P_IP:
664 iph = ip_hdr(skb);
665
666 /* do not accelerate non tcp/udp traffic */
667 if ((iph->protocol == IPPROTO_TCP) ||
668 (iph->protocol == IPPROTO_UDP) ||
669 (iph->protocol == IPPROTO_IPV6)) {
670 return 1;
671 }
672
673 break;
674 case ETH_P_IPV6:
675 ip6h = ipv6_hdr(skb);
676
677 if ((ip6h->nexthdr == NEXTHDR_TCP) ||
678 (ip6h->nexthdr == NEXTHDR_UDP)) {
679 return 1;
680 } else if (ip6h->nexthdr == NEXTHDR_IPIP) {
681 iph = skb_header_pointer(skb, IPV6_HDR_LEN,
682 sizeof(_iphdr), &_iphdr);
683
684 if ((iph->protocol == IPPROTO_TCP) ||
685 (iph->protocol == IPPROTO_UDP)) {
686 return 1;
687 }
688
689 }
690
691 break;
692 case ETH_P_8021Q:
693 return 1;
694 }
695
696 return 0;
697}
698
699static unsigned int
700mtk_hnat_ipv6_nf_pre_routing(void *priv, struct sk_buff *skb,
701 const struct nf_hook_state *state)
702{
703 if (!is_ppe_support_type(skb)) {
704 hnat_set_head_frags(state, skb, 1, hnat_set_alg);
705 return NF_ACCEPT;
706 }
707
708 hnat_set_head_frags(state, skb, -1, hnat_set_iif);
709
710 pre_routing_print(skb, state->in, state->out, __func__);
711
developerfd40db22021-04-29 10:08:25 +0800712 /* packets from external devices -> xxx ,step 1 , learning stage & bound stage*/
713 if (do_ext2ge_fast_try(state->in, skb)) {
714 if (!do_hnat_ext_to_ge(skb, state->in, __func__))
715 return NF_STOLEN;
716 if (!skb)
717 goto drop;
718 return NF_ACCEPT;
719 }
720
721 /* packets form ge -> external device
722 * For standalone wan interface
723 */
724 if (do_ge2ext_fast(state->in, skb)) {
725 if (!do_hnat_ge_to_ext(skb, __func__))
726 return NF_STOLEN;
727 goto drop;
728 }
729
730 /* MapE need remove ipv6 header and pingpong. */
731 if (do_mape_w2l_fast(state->in, skb)) {
732 if (!do_hnat_mape_w2l_fast(skb, state->in, __func__))
733 return NF_STOLEN;
734 else
735 return NF_ACCEPT;
736 }
737
738 if (is_from_mape(skb))
739 clr_from_extge(skb);
740
741 return NF_ACCEPT;
742drop:
743 printk_ratelimited(KERN_WARNING
744 "%s:drop (in_dev=%s, iif=0x%x, CB2=0x%x, ppe_hash=0x%x, sport=0x%x, reason=0x%x, alg=0x%x)\n",
745 __func__, state->in->name, skb_hnat_iface(skb),
746 HNAT_SKB_CB2(skb)->magic, skb_hnat_entry(skb),
747 skb_hnat_sport(skb), skb_hnat_reason(skb),
748 skb_hnat_alg(skb));
749
750 return NF_DROP;
751}
752
753static unsigned int
754mtk_hnat_ipv4_nf_pre_routing(void *priv, struct sk_buff *skb,
755 const struct nf_hook_state *state)
756{
757 if (!is_ppe_support_type(skb)) {
758 hnat_set_head_frags(state, skb, 1, hnat_set_alg);
759 return NF_ACCEPT;
760 }
761
762 hnat_set_head_frags(state, skb, -1, hnat_set_iif);
763
764 pre_routing_print(skb, state->in, state->out, __func__);
765
developerfd40db22021-04-29 10:08:25 +0800766 /* packets from external devices -> xxx ,step 1 , learning stage & bound stage*/
767 if (do_ext2ge_fast_try(state->in, skb)) {
768 if (!do_hnat_ext_to_ge(skb, state->in, __func__))
769 return NF_STOLEN;
770 if (!skb)
771 goto drop;
772 return NF_ACCEPT;
773 }
774
775 /* packets form ge -> external device
776 * For standalone wan interface
777 */
778 if (do_ge2ext_fast(state->in, skb)) {
779 if (!do_hnat_ge_to_ext(skb, __func__))
780 return NF_STOLEN;
781 goto drop;
782 }
783
784 return NF_ACCEPT;
785drop:
786 printk_ratelimited(KERN_WARNING
787 "%s:drop (in_dev=%s, iif=0x%x, CB2=0x%x, ppe_hash=0x%x, sport=0x%x, reason=0x%x, alg=0x%x)\n",
788 __func__, state->in->name, skb_hnat_iface(skb),
789 HNAT_SKB_CB2(skb)->magic, skb_hnat_entry(skb),
790 skb_hnat_sport(skb), skb_hnat_reason(skb),
791 skb_hnat_alg(skb));
792
793 return NF_DROP;
794}
795
796static unsigned int
797mtk_hnat_br_nf_local_in(void *priv, struct sk_buff *skb,
798 const struct nf_hook_state *state)
799{
800#if defined(CONFIG_NET_MEDIATEK_HW_QOS)
801 struct vlan_ethhdr *veth;
802
803 if (hnat_priv->data->whnat) {
804 veth = (struct vlan_ethhdr *)skb_mac_header(skb);
805
806 if (eth_hdr(skb)->h_proto == HQOS_MAGIC_TAG) {
807 skb_hnat_entry(skb) = ntohs(veth->h_vlan_TCI) & 0x3fff;
808 skb_hnat_reason(skb) = HIT_BIND_FORCE_TO_CPU;
809 }
810 }
811#endif
812
813 if (!HAS_HQOS_MAGIC_TAG(skb) && !is_ppe_support_type(skb)) {
814 hnat_set_head_frags(state, skb, 1, hnat_set_alg);
815 return NF_ACCEPT;
816 }
817
818 hnat_set_head_frags(state, skb, -1, hnat_set_iif);
819
820 pre_routing_print(skb, state->in, state->out, __func__);
821
822 if (unlikely(debug_level >= 7)) {
823 hnat_cpu_reason_cnt(skb);
824 if (skb_hnat_reason(skb) == dbg_cpu_reason)
825 foe_dump_pkt(skb);
826 }
827
developerfd40db22021-04-29 10:08:25 +0800828 /* packets from external devices -> xxx ,step 1 , learning stage & bound stage*/
829 if ((skb_hnat_iface(skb) == FOE_MAGIC_EXT) && !is_from_extge(skb) &&
830 !is_multicast_ether_addr(eth_hdr(skb)->h_dest)) {
831 if (!hnat_priv->g_ppdev)
832 hnat_priv->g_ppdev = dev_get_by_name(&init_net, hnat_priv->ppd);
833
834 if (!do_hnat_ext_to_ge(skb, state->in, __func__))
835 return NF_STOLEN;
836 if (!skb)
837 goto drop;
838 return NF_ACCEPT;
839 }
840
841 if (hnat_priv->data->whnat) {
842 if (skb_hnat_iface(skb) == FOE_MAGIC_EXT)
843 clr_from_extge(skb);
844
845 /* packets from external devices -> xxx ,step 2, learning stage */
846#if defined(CONFIG_NET_MEDIATEK_HW_QOS)
847 if (do_ext2ge_fast_learn(state->in, skb) && (eth_hdr(skb)->h_proto != HQOS_MAGIC_TAG)) {
848#else
849 if (do_ext2ge_fast_learn(state->in, skb)) {
850#endif
851 if (!do_hnat_ext_to_ge2(skb, __func__))
852 return NF_STOLEN;
853 goto drop;
854 }
855
856 /* packets form ge -> external device */
857 if (do_ge2ext_fast(state->in, skb)) {
858 if (!do_hnat_ge_to_ext(skb, __func__))
859 return NF_STOLEN;
860 goto drop;
861 }
862 }
863
864 /* MapE need remove ipv6 header and pingpong. (bridge mode) */
865 if (do_mape_w2l_fast(state->in, skb)) {
866 if (!do_hnat_mape_w2l_fast(skb, state->in, __func__))
867 return NF_STOLEN;
868 else
869 return NF_ACCEPT;
870 }
871
872 return NF_ACCEPT;
873drop:
874 printk_ratelimited(KERN_WARNING
875 "%s:drop (in_dev=%s, iif=0x%x, CB2=0x%x, ppe_hash=0x%x, sport=0x%x, reason=0x%x, alg=0x%x)\n",
876 __func__, state->in->name, skb_hnat_iface(skb),
877 HNAT_SKB_CB2(skb)->magic, skb_hnat_entry(skb),
878 skb_hnat_sport(skb), skb_hnat_reason(skb),
879 skb_hnat_alg(skb));
880
881 return NF_DROP;
882}
883
884static unsigned int hnat_ipv6_get_nexthop(struct sk_buff *skb,
885 const struct net_device *out,
886 struct flow_offload_hw_path *hw_path)
887{
888 const struct in6_addr *ipv6_nexthop;
889 struct neighbour *neigh = NULL;
890 struct dst_entry *dst = skb_dst(skb);
891 struct ethhdr *eth;
892
893 if (hw_path->flags & FLOW_OFFLOAD_PATH_PPPOE) {
894 memcpy(eth_hdr(skb)->h_source, hw_path->eth_src, ETH_ALEN);
895 memcpy(eth_hdr(skb)->h_dest, hw_path->eth_dest, ETH_ALEN);
896 return 0;
897 }
898
899 rcu_read_lock_bh();
900 ipv6_nexthop =
901 rt6_nexthop((struct rt6_info *)dst, &ipv6_hdr(skb)->daddr);
902 neigh = __ipv6_neigh_lookup_noref(dst->dev, ipv6_nexthop);
903 if (unlikely(!neigh)) {
904 dev_notice(hnat_priv->dev, "%s:No neigh (daddr=%pI6)\n", __func__,
905 &ipv6_hdr(skb)->daddr);
906 rcu_read_unlock_bh();
907 return -1;
908 }
909
910 /* why do we get all zero ethernet address ? */
911 if (!is_valid_ether_addr(neigh->ha)) {
912 rcu_read_unlock_bh();
913 return -1;
914 }
915
916 if (ipv6_hdr(skb)->nexthdr == NEXTHDR_IPIP) {
917 /*copy ether type for DS-Lite and MapE */
918 eth = (struct ethhdr *)(skb->data - ETH_HLEN);
919 eth->h_proto = skb->protocol;
920 } else {
921 eth = eth_hdr(skb);
922 }
923
924 ether_addr_copy(eth->h_dest, neigh->ha);
925 ether_addr_copy(eth->h_source, out->dev_addr);
926
927 rcu_read_unlock_bh();
928
929 return 0;
930}
931
932static unsigned int hnat_ipv4_get_nexthop(struct sk_buff *skb,
933 const struct net_device *out,
934 struct flow_offload_hw_path *hw_path)
935{
936 u32 nexthop;
937 struct neighbour *neigh;
938 struct dst_entry *dst = skb_dst(skb);
939 struct rtable *rt = (struct rtable *)dst;
940 struct net_device *dev = (__force struct net_device *)out;
941
942 if (hw_path->flags & FLOW_OFFLOAD_PATH_PPPOE) {
943 memcpy(eth_hdr(skb)->h_source, hw_path->eth_src, ETH_ALEN);
944 memcpy(eth_hdr(skb)->h_dest, hw_path->eth_dest, ETH_ALEN);
945 return 0;
946 }
947
948 rcu_read_lock_bh();
949 nexthop = (__force u32)rt_nexthop(rt, ip_hdr(skb)->daddr);
950 neigh = __ipv4_neigh_lookup_noref(dev, nexthop);
951 if (unlikely(!neigh)) {
952 dev_notice(hnat_priv->dev, "%s:No neigh (daddr=%pI4)\n", __func__,
953 &ip_hdr(skb)->daddr);
954 rcu_read_unlock_bh();
955 return -1;
956 }
957
958 /* why do we get all zero ethernet address ? */
959 if (!is_valid_ether_addr(neigh->ha)) {
960 rcu_read_unlock_bh();
961 return -1;
962 }
963
964 memcpy(eth_hdr(skb)->h_dest, neigh->ha, ETH_ALEN);
965 memcpy(eth_hdr(skb)->h_source, out->dev_addr, ETH_ALEN);
966
967 rcu_read_unlock_bh();
968
969 return 0;
970}
971
972static u16 ppe_get_chkbase(struct iphdr *iph)
973{
974 u16 org_chksum = ntohs(iph->check);
975 u16 org_tot_len = ntohs(iph->tot_len);
976 u16 org_id = ntohs(iph->id);
977 u16 chksum_tmp, tot_len_tmp, id_tmp;
978 u32 tmp = 0;
979 u16 chksum_base = 0;
980
981 chksum_tmp = ~(org_chksum);
982 tot_len_tmp = ~(org_tot_len);
983 id_tmp = ~(org_id);
984 tmp = chksum_tmp + tot_len_tmp + id_tmp;
985 tmp = ((tmp >> 16) & 0x7) + (tmp & 0xFFFF);
986 tmp = ((tmp >> 16) & 0x7) + (tmp & 0xFFFF);
987 chksum_base = tmp & 0xFFFF;
988
989 return chksum_base;
990}
991
992struct foe_entry ppe_fill_L2_info(struct ethhdr *eth, struct foe_entry entry,
993 struct flow_offload_hw_path *hw_path)
994{
995 switch (entry.bfib1.pkt_type) {
996 case IPV4_HNAPT:
997 case IPV4_HNAT:
998 entry.ipv4_hnapt.dmac_hi = swab32(*((u32 *)eth->h_dest));
999 entry.ipv4_hnapt.dmac_lo = swab16(*((u16 *)&eth->h_dest[4]));
1000 entry.ipv4_hnapt.smac_hi = swab32(*((u32 *)eth->h_source));
1001 entry.ipv4_hnapt.smac_lo = swab16(*((u16 *)&eth->h_source[4]));
1002 entry.ipv4_hnapt.pppoe_id = hw_path->pppoe_sid;
1003 break;
1004 case IPV4_DSLITE:
1005 case IPV4_MAP_E:
1006 case IPV6_6RD:
1007 case IPV6_5T_ROUTE:
1008 case IPV6_3T_ROUTE:
1009 entry.ipv6_5t_route.dmac_hi = swab32(*((u32 *)eth->h_dest));
1010 entry.ipv6_5t_route.dmac_lo = swab16(*((u16 *)&eth->h_dest[4]));
1011 entry.ipv6_5t_route.smac_hi = swab32(*((u32 *)eth->h_source));
1012 entry.ipv6_5t_route.smac_lo =
1013 swab16(*((u16 *)&eth->h_source[4]));
1014 entry.ipv6_5t_route.pppoe_id = hw_path->pppoe_sid;
1015 break;
1016 }
1017 return entry;
1018}
1019
1020struct foe_entry ppe_fill_info_blk(struct ethhdr *eth, struct foe_entry entry,
1021 struct flow_offload_hw_path *hw_path)
1022{
1023 entry.bfib1.psn = (hw_path->flags & FLOW_OFFLOAD_PATH_PPPOE) ? 1 : 0;
1024 entry.bfib1.vlan_layer += (hw_path->flags & FLOW_OFFLOAD_PATH_VLAN) ? 1 : 0;
1025 entry.bfib1.vpm = (entry.bfib1.vlan_layer) ? 1 : 0;
1026 entry.bfib1.ttl = 1;
1027 entry.bfib1.cah = 1;
1028 entry.bfib1.ka = 1;
1029 entry.bfib1.time_stamp = (hnat_priv->data->version == MTK_HNAT_V4) ?
1030 readl(hnat_priv->fe_base + 0x0010) & (0xFF) :
1031 readl(hnat_priv->fe_base + 0x0010) & (0x7FFF);
1032
1033 switch (entry.bfib1.pkt_type) {
1034 case IPV4_HNAPT:
1035 case IPV4_HNAT:
1036 if (is_multicast_ether_addr(&eth->h_dest[0])) {
1037 entry.ipv4_hnapt.iblk2.mcast = 1;
1038 if (hnat_priv->data->version == MTK_HNAT_V3) {
1039 entry.bfib1.sta = 1;
1040 entry.ipv4_hnapt.m_timestamp = foe_timestamp(hnat_priv);
1041 }
1042 } else {
1043 entry.ipv4_hnapt.iblk2.mcast = 0;
1044 }
1045
1046 entry.ipv4_hnapt.iblk2.port_ag =
1047 (hnat_priv->data->version == MTK_HNAT_V4) ? 0x3f : 0xf;
1048 break;
1049 case IPV4_DSLITE:
1050 case IPV4_MAP_E:
1051 case IPV6_6RD:
1052 case IPV6_5T_ROUTE:
1053 case IPV6_3T_ROUTE:
1054 if (is_multicast_ether_addr(&eth->h_dest[0])) {
1055 entry.ipv6_5t_route.iblk2.mcast = 1;
1056 if (hnat_priv->data->version == MTK_HNAT_V3) {
1057 entry.bfib1.sta = 1;
1058 entry.ipv4_hnapt.m_timestamp = foe_timestamp(hnat_priv);
1059 }
1060 } else {
1061 entry.ipv6_5t_route.iblk2.mcast = 0;
1062 }
1063
1064 entry.ipv6_5t_route.iblk2.port_ag =
1065 (hnat_priv->data->version == MTK_HNAT_V4) ? 0x3f : 0xf;
1066 break;
1067 }
1068 return entry;
1069}
1070
1071static void ppe_fill_flow_lbl(struct foe_entry *entry, struct ipv6hdr *ip6h)
1072{
1073 entry->ipv4_dslite.flow_lbl[0] = ip6h->flow_lbl[2];
1074 entry->ipv4_dslite.flow_lbl[1] = ip6h->flow_lbl[1];
1075 entry->ipv4_dslite.flow_lbl[2] = ip6h->flow_lbl[0];
1076}
1077
1078static unsigned int skb_to_hnat_info(struct sk_buff *skb,
1079 const struct net_device *dev,
1080 struct foe_entry *foe,
1081 struct flow_offload_hw_path *hw_path)
1082{
1083 struct foe_entry entry = { 0 };
1084 int whnat = IS_WHNAT(dev);
1085 struct ethhdr *eth;
1086 struct iphdr *iph;
1087 struct ipv6hdr *ip6h;
1088 struct tcpudphdr _ports;
1089 const struct tcpudphdr *pptr;
1090 u32 gmac = NR_DISCARD;
1091 int udp = 0;
1092 u32 qid = 0;
1093 int mape = 0;
1094
1095 if (ipv6_hdr(skb)->nexthdr == NEXTHDR_IPIP)
1096 /* point to ethernet header for DS-Lite and MapE */
1097 eth = (struct ethhdr *)(skb->data - ETH_HLEN);
1098 else
1099 eth = eth_hdr(skb);
1100 if (is_multicast_ether_addr(eth->h_dest)) {
1101 /*do not bind multicast if PPE mcast not enable*/
1102 if (!hnat_priv->pmcast)
1103 return 0;
1104 }
1105
1106 entry.bfib1.pkt_type = foe->udib1.pkt_type; /* Get packte type state*/
1107#if defined(CONFIG_MEDIATEK_NETSYS_V2)
1108 entry.bfib1.sp = foe->udib1.sp;
1109#endif
1110
1111 switch (ntohs(eth->h_proto)) {
1112 case ETH_P_IP:
1113 iph = ip_hdr(skb);
1114 switch (iph->protocol) {
1115 case IPPROTO_UDP:
1116 udp = 1;
1117 /* fallthrough */
1118 case IPPROTO_TCP:
1119 entry.ipv4_hnapt.etype = htons(ETH_P_IP);
1120
1121 /* DS-Lite WAN->LAN */
1122 if (entry.ipv4_hnapt.bfib1.pkt_type == IPV4_DSLITE ||
1123 entry.ipv4_hnapt.bfib1.pkt_type == IPV4_MAP_E) {
1124 entry.ipv4_dslite.sip = foe->ipv4_dslite.sip;
1125 entry.ipv4_dslite.dip = foe->ipv4_dslite.dip;
1126 entry.ipv4_dslite.sport =
1127 foe->ipv4_dslite.sport;
1128 entry.ipv4_dslite.dport =
1129 foe->ipv4_dslite.dport;
1130
1131#if defined(CONFIG_MEDIATEK_NETSYS_V2)
1132 if (entry.bfib1.pkt_type == IPV4_MAP_E) {
1133 pptr = skb_header_pointer(skb,
1134 iph->ihl * 4,
1135 sizeof(_ports),
1136 &_ports);
1137
1138 entry.ipv4_dslite.new_sip =
1139 ntohl(iph->saddr);
1140 entry.ipv4_dslite.new_dip =
1141 ntohl(iph->daddr);
1142 entry.ipv4_dslite.new_sport =
1143 ntohs(pptr->src);
1144 entry.ipv4_dslite.new_dport =
1145 ntohs(pptr->dst);
1146 }
1147#endif
1148
1149 entry.ipv4_dslite.tunnel_sipv6_0 =
1150 foe->ipv4_dslite.tunnel_sipv6_0;
1151 entry.ipv4_dslite.tunnel_sipv6_1 =
1152 foe->ipv4_dslite.tunnel_sipv6_1;
1153 entry.ipv4_dslite.tunnel_sipv6_2 =
1154 foe->ipv4_dslite.tunnel_sipv6_2;
1155 entry.ipv4_dslite.tunnel_sipv6_3 =
1156 foe->ipv4_dslite.tunnel_sipv6_3;
1157
1158 entry.ipv4_dslite.tunnel_dipv6_0 =
1159 foe->ipv4_dslite.tunnel_dipv6_0;
1160 entry.ipv4_dslite.tunnel_dipv6_1 =
1161 foe->ipv4_dslite.tunnel_dipv6_1;
1162 entry.ipv4_dslite.tunnel_dipv6_2 =
1163 foe->ipv4_dslite.tunnel_dipv6_2;
1164 entry.ipv4_dslite.tunnel_dipv6_3 =
1165 foe->ipv4_dslite.tunnel_dipv6_3;
1166
1167 entry.ipv4_dslite.bfib1.rmt = 1;
1168 entry.ipv4_dslite.iblk2.dscp = iph->tos;
1169 entry.ipv4_dslite.vlan1 = hw_path->vlan_id;
1170 if (hnat_priv->data->per_flow_accounting)
1171 entry.ipv4_dslite.iblk2.mibf = 1;
1172
1173 } else {
1174 entry.ipv4_hnapt.iblk2.dscp = iph->tos;
1175 if (hnat_priv->data->per_flow_accounting)
1176 entry.ipv4_hnapt.iblk2.mibf = 1;
1177
1178 entry.ipv4_hnapt.vlan1 = hw_path->vlan_id;
1179
1180 if (skb->vlan_tci && FROM_GE_WAN(skb) && IS_LAN(dev)) {
1181 entry.bfib1.vlan_layer += 1;
1182
1183 if (entry.ipv4_hnapt.vlan1)
1184 entry.ipv4_hnapt.vlan2 = (skb->vlan_tci & VLAN_VID_MASK);
1185 else
1186 entry.ipv4_hnapt.vlan1 = (skb->vlan_tci & VLAN_VID_MASK);
1187 }
1188
1189 entry.ipv4_hnapt.sip = foe->ipv4_hnapt.sip;
1190 entry.ipv4_hnapt.dip = foe->ipv4_hnapt.dip;
1191 entry.ipv4_hnapt.sport = foe->ipv4_hnapt.sport;
1192 entry.ipv4_hnapt.dport = foe->ipv4_hnapt.dport;
1193
1194 entry.ipv4_hnapt.new_sip = ntohl(iph->saddr);
1195 entry.ipv4_hnapt.new_dip = ntohl(iph->daddr);
1196 }
1197
1198 entry.ipv4_hnapt.bfib1.udp = udp;
1199 if (IS_IPV4_HNAPT(foe)) {
1200 pptr = skb_header_pointer(skb, iph->ihl * 4,
1201 sizeof(_ports),
1202 &_ports);
1203 entry.ipv4_hnapt.new_sport = ntohs(pptr->src);
1204 entry.ipv4_hnapt.new_dport = ntohs(pptr->dst);
1205 }
1206
1207 break;
1208
1209 default:
1210 return -1;
1211 }
1212 trace_printk(
1213 "[%s]skb->head=%p, skb->data=%p,ip_hdr=%p, skb->len=%d, skb->data_len=%d\n",
1214 __func__, skb->head, skb->data, iph, skb->len,
1215 skb->data_len);
1216 break;
1217
1218 case ETH_P_IPV6:
1219 ip6h = ipv6_hdr(skb);
1220 switch (ip6h->nexthdr) {
1221 case NEXTHDR_UDP:
1222 udp = 1;
1223 /* fallthrough */
1224 case NEXTHDR_TCP: /* IPv6-5T or IPv6-3T */
1225 entry.ipv6_5t_route.etype = htons(ETH_P_IPV6);
1226
1227 entry.ipv6_5t_route.vlan1 = hw_path->vlan_id;
1228
1229 if (skb->vlan_tci && FROM_GE_WAN(skb) && IS_LAN(dev)) {
1230 entry.bfib1.vlan_layer += 1;
1231
1232 if (entry.ipv6_5t_route.vlan1)
1233 entry.ipv6_5t_route.vlan2 = (skb->vlan_tci & VLAN_VID_MASK);
1234 else
1235 entry.ipv6_5t_route.vlan1 = (skb->vlan_tci & VLAN_VID_MASK);
1236 }
1237
1238 if (hnat_priv->data->per_flow_accounting)
1239 entry.ipv6_5t_route.iblk2.mibf = 1;
1240 entry.ipv6_5t_route.bfib1.udp = udp;
1241
1242 if (IS_IPV6_6RD(foe)) {
1243 entry.ipv6_5t_route.bfib1.rmt = 1;
1244 entry.ipv6_6rd.tunnel_sipv4 =
1245 foe->ipv6_6rd.tunnel_sipv4;
1246 entry.ipv6_6rd.tunnel_dipv4 =
1247 foe->ipv6_6rd.tunnel_dipv4;
1248 }
1249
1250 entry.ipv6_3t_route.ipv6_sip0 =
1251 foe->ipv6_3t_route.ipv6_sip0;
1252 entry.ipv6_3t_route.ipv6_sip1 =
1253 foe->ipv6_3t_route.ipv6_sip1;
1254 entry.ipv6_3t_route.ipv6_sip2 =
1255 foe->ipv6_3t_route.ipv6_sip2;
1256 entry.ipv6_3t_route.ipv6_sip3 =
1257 foe->ipv6_3t_route.ipv6_sip3;
1258
1259 entry.ipv6_3t_route.ipv6_dip0 =
1260 foe->ipv6_3t_route.ipv6_dip0;
1261 entry.ipv6_3t_route.ipv6_dip1 =
1262 foe->ipv6_3t_route.ipv6_dip1;
1263 entry.ipv6_3t_route.ipv6_dip2 =
1264 foe->ipv6_3t_route.ipv6_dip2;
1265 entry.ipv6_3t_route.ipv6_dip3 =
1266 foe->ipv6_3t_route.ipv6_dip3;
1267
developer729f0272021-06-09 17:28:38 +08001268 if (IS_IPV6_3T_ROUTE(foe)) {
1269 entry.ipv6_3t_route.prot =
1270 foe->ipv6_3t_route.prot;
1271 entry.ipv6_3t_route.hph =
1272 foe->ipv6_3t_route.hph;
1273 }
1274
developerfd40db22021-04-29 10:08:25 +08001275 if (IS_IPV6_5T_ROUTE(foe) || IS_IPV6_6RD(foe)) {
1276 entry.ipv6_5t_route.sport =
1277 foe->ipv6_5t_route.sport;
1278 entry.ipv6_5t_route.dport =
1279 foe->ipv6_5t_route.dport;
1280 }
1281 entry.ipv6_5t_route.iblk2.dscp =
1282 (ip6h->priority << 4 |
1283 (ip6h->flow_lbl[0] >> 4));
1284 break;
1285
1286 case NEXTHDR_IPIP:
1287 if ((!mape_toggle &&
1288 entry.bfib1.pkt_type == IPV4_DSLITE) ||
1289 (mape_toggle &&
1290 entry.bfib1.pkt_type == IPV4_MAP_E)) {
1291 /* DS-Lite LAN->WAN */
1292 entry.ipv4_dslite.bfib1.udp =
1293 foe->ipv4_dslite.bfib1.udp;
1294 entry.ipv4_dslite.sip = foe->ipv4_dslite.sip;
1295 entry.ipv4_dslite.dip = foe->ipv4_dslite.dip;
1296 entry.ipv4_dslite.sport =
1297 foe->ipv4_dslite.sport;
1298 entry.ipv4_dslite.dport =
1299 foe->ipv4_dslite.dport;
1300
1301 entry.ipv4_dslite.tunnel_sipv6_0 =
1302 ntohl(ip6h->saddr.s6_addr32[0]);
1303 entry.ipv4_dslite.tunnel_sipv6_1 =
1304 ntohl(ip6h->saddr.s6_addr32[1]);
1305 entry.ipv4_dslite.tunnel_sipv6_2 =
1306 ntohl(ip6h->saddr.s6_addr32[2]);
1307 entry.ipv4_dslite.tunnel_sipv6_3 =
1308 ntohl(ip6h->saddr.s6_addr32[3]);
1309
1310 entry.ipv4_dslite.tunnel_dipv6_0 =
1311 ntohl(ip6h->daddr.s6_addr32[0]);
1312 entry.ipv4_dslite.tunnel_dipv6_1 =
1313 ntohl(ip6h->daddr.s6_addr32[1]);
1314 entry.ipv4_dslite.tunnel_dipv6_2 =
1315 ntohl(ip6h->daddr.s6_addr32[2]);
1316 entry.ipv4_dslite.tunnel_dipv6_3 =
1317 ntohl(ip6h->daddr.s6_addr32[3]);
1318
1319 ppe_fill_flow_lbl(&entry, ip6h);
1320
1321 entry.ipv4_dslite.priority = ip6h->priority;
1322 entry.ipv4_dslite.hop_limit = ip6h->hop_limit;
1323 entry.ipv4_dslite.vlan1 = hw_path->vlan_id;
1324 if (hnat_priv->data->per_flow_accounting)
1325 entry.ipv4_dslite.iblk2.mibf = 1;
1326 } else if (mape_toggle &&
1327 entry.bfib1.pkt_type == IPV4_HNAPT) {
1328 /* MapE LAN -> WAN */
1329 mape = 1;
1330 entry.ipv4_hnapt.iblk2.dscp =
1331 foe->ipv4_hnapt.iblk2.dscp;
1332 if (hnat_priv->data->per_flow_accounting)
1333 entry.ipv4_hnapt.iblk2.mibf = 1;
1334
developerbb816412021-06-11 15:43:44 +08001335 if (IS_GMAC1_MODE)
1336 entry.ipv4_hnapt.vlan1 = 1;
1337 else
1338 entry.ipv4_hnapt.vlan1 = hw_path->vlan_id;
developerfd40db22021-04-29 10:08:25 +08001339
1340 entry.ipv4_hnapt.sip = foe->ipv4_hnapt.sip;
1341 entry.ipv4_hnapt.dip = foe->ipv4_hnapt.dip;
1342 entry.ipv4_hnapt.sport = foe->ipv4_hnapt.sport;
1343 entry.ipv4_hnapt.dport = foe->ipv4_hnapt.dport;
1344
1345 entry.ipv4_hnapt.new_sip =
1346 foe->ipv4_hnapt.new_sip;
1347 entry.ipv4_hnapt.new_dip =
1348 foe->ipv4_hnapt.new_dip;
1349 entry.ipv4_hnapt.etype = htons(ETH_P_IP);
1350
1351#if defined(CONFIG_NET_MEDIATEK_HW_QOS)
1352 entry.ipv4_hnapt.iblk2.qid =
1353 (hnat_priv->data->version == MTK_HNAT_V4) ?
1354 skb->mark & 0x7f : skb->mark & 0xf;
1355 entry.ipv4_hnapt.iblk2.fqos = 1;
1356#endif
1357
1358 entry.ipv4_hnapt.bfib1.udp =
1359 foe->ipv4_hnapt.bfib1.udp;
1360
1361 entry.ipv4_hnapt.new_sport =
1362 foe->ipv4_hnapt.new_sport;
1363 entry.ipv4_hnapt.new_dport =
1364 foe->ipv4_hnapt.new_dport;
1365 mape_l2w_v6h = *ip6h;
1366 }
1367 break;
1368
1369 default:
1370 return -1;
1371 }
1372
1373 trace_printk(
1374 "[%s]skb->head=%p, skb->data=%p,ipv6_hdr=%p, skb->len=%d, skb->data_len=%d\n",
1375 __func__, skb->head, skb->data, ip6h, skb->len,
1376 skb->data_len);
1377 break;
1378
1379 default:
1380 ip6h = ipv6_hdr(skb);
1381 iph = ip_hdr(skb);
1382 switch (entry.bfib1.pkt_type) {
1383 case IPV6_6RD: /* 6RD LAN->WAN */
1384 entry.ipv6_6rd.ipv6_sip0 = foe->ipv6_6rd.ipv6_sip0;
1385 entry.ipv6_6rd.ipv6_sip1 = foe->ipv6_6rd.ipv6_sip1;
1386 entry.ipv6_6rd.ipv6_sip2 = foe->ipv6_6rd.ipv6_sip2;
1387 entry.ipv6_6rd.ipv6_sip3 = foe->ipv6_6rd.ipv6_sip3;
1388
1389 entry.ipv6_6rd.ipv6_dip0 = foe->ipv6_6rd.ipv6_dip0;
1390 entry.ipv6_6rd.ipv6_dip1 = foe->ipv6_6rd.ipv6_dip1;
1391 entry.ipv6_6rd.ipv6_dip2 = foe->ipv6_6rd.ipv6_dip2;
1392 entry.ipv6_6rd.ipv6_dip3 = foe->ipv6_6rd.ipv6_dip3;
1393
1394 entry.ipv6_6rd.sport = foe->ipv6_6rd.sport;
1395 entry.ipv6_6rd.dport = foe->ipv6_6rd.dport;
1396 entry.ipv6_6rd.tunnel_sipv4 = ntohl(iph->saddr);
1397 entry.ipv6_6rd.tunnel_dipv4 = ntohl(iph->daddr);
1398 entry.ipv6_6rd.hdr_chksum = ppe_get_chkbase(iph);
1399 entry.ipv6_6rd.flag = (ntohs(iph->frag_off) >> 13);
1400 entry.ipv6_6rd.ttl = iph->ttl;
1401 entry.ipv6_6rd.dscp = iph->tos;
1402 entry.ipv6_6rd.per_flow_6rd_id = 1;
1403 entry.ipv6_6rd.vlan1 = hw_path->vlan_id;
1404 if (hnat_priv->data->per_flow_accounting)
1405 entry.ipv6_6rd.iblk2.mibf = 1;
1406 break;
1407
1408 default:
1409 return -1;
1410 }
1411 }
1412
1413 /* Fill Layer2 Info.*/
1414 entry = ppe_fill_L2_info(eth, entry, hw_path);
1415
1416 /* Fill Info Blk*/
1417 entry = ppe_fill_info_blk(eth, entry, hw_path);
1418
1419 if (IS_LAN(dev)) {
1420 if (IS_DSA_LAN(dev))
1421 hnat_dsa_fill_stag(dev, &entry, hw_path,
1422 ntohs(eth->h_proto), mape);
1423
1424 if (IS_BOND_MODE)
1425 gmac = ((skb_hnat_entry(skb) >> 1) % hnat_priv->gmac_num) ?
1426 NR_GMAC2_PORT : NR_GMAC1_PORT;
1427 else
1428 gmac = NR_GMAC1_PORT;
1429 } else if (IS_WAN(dev)) {
1430 if (IS_DSA_WAN(dev))
1431 hnat_dsa_fill_stag(dev, &entry, hw_path,
1432 ntohs(eth->h_proto), mape);
1433 if (mape_toggle && mape == 1) {
1434 gmac = NR_PDMA_PORT;
1435 /* Set act_dp = wan_dev */
1436 entry.ipv4_hnapt.act_dp = dev->ifindex;
1437 } else {
1438 gmac = (IS_GMAC1_MODE) ? NR_GMAC1_PORT : NR_GMAC2_PORT;
1439 }
1440 } else if (IS_EXT(dev) && (FROM_GE_PPD(skb) || FROM_GE_LAN(skb) ||
developer99506e52021-06-30 22:03:02 +08001441 FROM_GE_WAN(skb) || FROM_GE_VIRTUAL(skb) || FROM_WED(skb))) {
developerfd40db22021-04-29 10:08:25 +08001442 if (!hnat_priv->data->whnat && IS_GMAC1_MODE) {
1443 entry.bfib1.vpm = 1;
1444 entry.bfib1.vlan_layer = 1;
1445
1446 if (FROM_GE_LAN(skb))
1447 entry.ipv4_hnapt.vlan1 = 1;
1448 else if (FROM_GE_WAN(skb) || FROM_GE_VIRTUAL(skb))
1449 entry.ipv4_hnapt.vlan1 = 2;
1450 }
1451
1452 trace_printk("learn of lan or wan(iif=%x) --> %s(ext)\n",
1453 skb_hnat_iface(skb), dev->name);
1454 /* To CPU then stolen by pre-routing hant hook of LAN/WAN
1455 * Current setting is PDMA RX.
1456 */
1457 gmac = NR_PDMA_PORT;
1458 if (IS_IPV4_GRP(foe))
1459 entry.ipv4_hnapt.act_dp = dev->ifindex;
1460 else
1461 entry.ipv6_5t_route.act_dp = dev->ifindex;
1462 } else {
1463 printk_ratelimited(KERN_WARNING
1464 "Unknown case of dp, iif=%x --> %s\n",
1465 skb_hnat_iface(skb), dev->name);
1466
1467 return 0;
1468 }
1469
1470 qid = skb->mark & (MTK_QDMA_TX_MASK);
1471
1472 if (IS_IPV4_GRP(foe)) {
1473 entry.ipv4_hnapt.iblk2.dp = gmac;
1474 entry.ipv4_hnapt.iblk2.port_mg =
1475 (hnat_priv->data->version == MTK_HNAT_V1) ? 0x3f : 0;
1476#if defined(CONFIG_NET_MEDIATEK_HW_QOS)
1477 if (hnat_priv->data->version == MTK_HNAT_V4) {
1478 entry.ipv4_hnapt.iblk2.qid = qid & 0x7f;
1479 } else {
1480 /* qid[5:0]= port_mg[1:0]+ qid[3:0] */
1481 entry.ipv4_hnapt.iblk2.qid = qid & 0xf;
1482 if (hnat_priv->data->version != MTK_HNAT_V1)
1483 entry.ipv4_hnapt.iblk2.port_mg |=
1484 ((qid >> 4) & 0x3);
1485
1486 if (((IS_EXT(dev) && (FROM_GE_LAN(skb) ||
1487 FROM_GE_WAN(skb) || FROM_GE_VIRTUAL(skb))) ||
1488 ((mape_toggle && mape == 1) && !FROM_EXT(skb))) &&
1489 (!whnat)) {
1490 entry.ipv4_hnapt.etype = htons(HQOS_MAGIC_TAG);
1491 entry.ipv4_hnapt.vlan1 = skb_hnat_entry(skb);
1492 entry.bfib1.vlan_layer = 1;
1493 }
1494 }
1495
1496 if (FROM_EXT(skb) || skb_hnat_sport(skb) == NR_QDMA_PORT)
1497 entry.ipv4_hnapt.iblk2.fqos = 0;
1498 else
1499 entry.ipv4_hnapt.iblk2.fqos = 1;
1500#else
1501 entry.ipv4_hnapt.iblk2.fqos = 0;
1502#endif
1503 } else {
1504 entry.ipv6_5t_route.iblk2.dp = gmac;
1505 entry.ipv6_5t_route.iblk2.port_mg =
1506 (hnat_priv->data->version == MTK_HNAT_V1) ? 0x3f : 0;
1507#if defined(CONFIG_NET_MEDIATEK_HW_QOS)
1508 if (hnat_priv->data->version == MTK_HNAT_V4) {
1509 entry.ipv6_5t_route.iblk2.qid = qid & 0x7f;
1510 } else {
1511 /* qid[5:0]= port_mg[1:0]+ qid[3:0] */
1512 entry.ipv6_5t_route.iblk2.qid = qid & 0xf;
1513 if (hnat_priv->data->version != MTK_HNAT_V1)
1514 entry.ipv6_5t_route.iblk2.port_mg |=
1515 ((qid >> 4) & 0x3);
1516
1517 if (IS_EXT(dev) && (FROM_GE_LAN(skb) ||
1518 FROM_GE_WAN(skb) || FROM_GE_VIRTUAL(skb)) &&
1519 (!whnat)) {
1520 entry.ipv6_5t_route.etype = htons(HQOS_MAGIC_TAG);
1521 entry.ipv6_5t_route.vlan1 = skb_hnat_entry(skb);
1522 entry.bfib1.vlan_layer = 1;
1523 }
1524 }
1525
1526 if (FROM_EXT(skb))
1527 entry.ipv6_5t_route.iblk2.fqos = 0;
1528 else
1529 entry.ipv6_5t_route.iblk2.fqos = 1;
1530#else
1531 entry.ipv6_5t_route.iblk2.fqos = 0;
1532#endif
1533 }
1534
developer60e60962021-06-15 21:05:07 +08001535 /* The INFO2.port_mg and 2nd VLAN ID fields of PPE entry are redefined
1536 * by Wi-Fi whnat engine. These data and INFO2.dp will be updated and
1537 * the entry is set to BIND state in mtk_sw_nat_hook_tx().
1538 */
1539 if (!whnat)
1540 entry.bfib1.state = BIND;
1541
developerfd40db22021-04-29 10:08:25 +08001542 memcpy(foe, &entry, sizeof(entry));
1543 /*reset statistic for this entry*/
1544 if (hnat_priv->data->per_flow_accounting)
developer471f6562021-05-10 20:48:34 +08001545 memset(&hnat_priv->acct[skb_hnat_ppe(skb)][skb_hnat_entry(skb)],
1546 0, sizeof(struct mib_entry));
developerfd40db22021-04-29 10:08:25 +08001547
1548 wmb();
developerfd40db22021-04-29 10:08:25 +08001549
1550 return 0;
1551}
1552
1553int mtk_sw_nat_hook_tx(struct sk_buff *skb, int gmac_no)
1554{
1555 struct foe_entry *entry;
1556 struct ethhdr *eth;
1557
1558 if (skb_hnat_alg(skb) || !is_magic_tag_valid(skb) || !IS_SPACE_AVAILABLE_HEAD(skb))
1559 return NF_ACCEPT;
1560
1561 trace_printk(
1562 "[%s]entry=%x reason=%x gmac_no=%x wdmaid=%x rxid=%x wcid=%x bssid=%x\n",
1563 __func__, skb_hnat_entry(skb), skb_hnat_reason(skb), gmac_no,
1564 skb_hnat_wdma_id(skb), skb_hnat_bss_id(skb),
1565 skb_hnat_wc_id(skb), skb_hnat_rx_id(skb));
1566
developer99506e52021-06-30 22:03:02 +08001567 if ((gmac_no != NR_WDMA0_PORT) && (gmac_no != NR_WDMA1_PORT) &&
1568 (gmac_no != NR_WHNAT_WDMA_PORT))
1569 return NF_ACCEPT;
1570
developerfd40db22021-04-29 10:08:25 +08001571 if (!skb_hnat_is_hashed(skb))
1572 return NF_ACCEPT;
1573
developer471f6562021-05-10 20:48:34 +08001574 entry = &hnat_priv->foe_table_cpu[skb_hnat_ppe(skb)][skb_hnat_entry(skb)];
developerfd40db22021-04-29 10:08:25 +08001575 if (entry_hnat_is_bound(entry))
1576 return NF_ACCEPT;
1577
1578 if (skb_hnat_reason(skb) != HIT_UNBIND_RATE_REACH)
1579 return NF_ACCEPT;
1580
1581 eth = eth_hdr(skb);
1582 if (is_multicast_ether_addr(eth->h_dest)) {
1583 /*not bind multicast if PPE mcast not enable*/
1584 if (!hnat_priv->pmcast)
1585 return NF_ACCEPT;
1586 }
1587
1588 /* Some mt_wifi virtual interfaces, such as apcli,
1589 * will change the smac for specail purpose.
1590 */
1591 switch (entry->bfib1.pkt_type) {
1592 case IPV4_HNAPT:
1593 case IPV4_HNAT:
1594 entry->ipv4_hnapt.smac_hi = swab32(*((u32 *)eth->h_source));
1595 entry->ipv4_hnapt.smac_lo = swab16(*((u16 *)&eth->h_source[4]));
1596 break;
1597 case IPV4_DSLITE:
1598 case IPV4_MAP_E:
1599 case IPV6_6RD:
1600 case IPV6_5T_ROUTE:
1601 case IPV6_3T_ROUTE:
1602 entry->ipv6_5t_route.smac_hi = swab32(*((u32 *)eth->h_source));
1603 entry->ipv6_5t_route.smac_lo = swab16(*((u16 *)&eth->h_source[4]));
1604 break;
1605 }
1606
developer60e60962021-06-15 21:05:07 +08001607 entry->bfib1.vpm = 0;
1608 entry->bfib1.vlan_layer = 0;
1609
developerfd40db22021-04-29 10:08:25 +08001610 /* MT7622 wifi hw_nat not support QoS */
1611 if (IS_IPV4_GRP(entry)) {
1612 entry->ipv4_hnapt.iblk2.fqos = 0;
developere567ad32021-05-25 17:16:17 +08001613 if ((hnat_priv->data->version == MTK_HNAT_V2 &&
1614 gmac_no == NR_WHNAT_WDMA_PORT) ||
1615 (hnat_priv->data->version == MTK_HNAT_V4 &&
1616 (gmac_no == NR_WDMA0_PORT || gmac_no == NR_WDMA1_PORT))) {
developerfd40db22021-04-29 10:08:25 +08001617 entry->ipv4_hnapt.winfo.bssid = skb_hnat_bss_id(skb);
1618 entry->ipv4_hnapt.winfo.wcid = skb_hnat_wc_id(skb);
1619#if defined(CONFIG_MEDIATEK_NETSYS_V2)
1620 entry->ipv4_hnapt.iblk2.rxid = skb_hnat_rx_id(skb);
1621 entry->ipv4_hnapt.iblk2.winfoi = 1;
1622#else
1623 entry->ipv4_hnapt.winfo.rxid = skb_hnat_rx_id(skb);
1624 entry->ipv4_hnapt.iblk2w.winfoi = 1;
1625 entry->ipv4_hnapt.iblk2w.wdmaid = skb_hnat_wdma_id(skb);
1626#endif
1627 } else {
1628 if (IS_GMAC1_MODE && !hnat_dsa_is_enable(hnat_priv)) {
1629 entry->bfib1.vpm = 1;
1630 entry->bfib1.vlan_layer = 1;
1631
1632 if (FROM_GE_LAN(skb))
1633 entry->ipv4_hnapt.vlan1 = 1;
1634 else if (FROM_GE_WAN(skb) || FROM_GE_VIRTUAL(skb))
1635 entry->ipv4_hnapt.vlan1 = 2;
1636 }
1637
1638#if defined(CONFIG_NET_MEDIATEK_HW_QOS)
1639 if (FROM_GE_LAN(skb) || FROM_GE_WAN(skb) || FROM_GE_VIRTUAL(skb)) {
1640 entry->bfib1.vpm = 0;
1641 entry->bfib1.vlan_layer = 1;
1642 entry->ipv4_hnapt.etype = htons(HQOS_MAGIC_TAG);
1643 entry->ipv4_hnapt.vlan1 = skb_hnat_entry(skb);
1644 entry->ipv4_hnapt.iblk2.fqos = 1;
1645 }
1646#endif
1647 }
1648 entry->ipv4_hnapt.iblk2.dp = gmac_no;
1649 } else {
1650 entry->ipv6_5t_route.iblk2.fqos = 0;
developere567ad32021-05-25 17:16:17 +08001651 if ((hnat_priv->data->version == MTK_HNAT_V2 &&
1652 gmac_no == NR_WHNAT_WDMA_PORT) ||
1653 (hnat_priv->data->version == MTK_HNAT_V4 &&
1654 (gmac_no == NR_WDMA0_PORT || gmac_no == NR_WDMA1_PORT))) {
developerfd40db22021-04-29 10:08:25 +08001655 entry->ipv6_5t_route.winfo.bssid = skb_hnat_bss_id(skb);
1656 entry->ipv6_5t_route.winfo.wcid = skb_hnat_wc_id(skb);
1657#if defined(CONFIG_MEDIATEK_NETSYS_V2)
1658 entry->ipv6_5t_route.iblk2.rxid = skb_hnat_rx_id(skb);
1659 entry->ipv6_5t_route.iblk2.winfoi = 1;
1660#else
1661 entry->ipv6_5t_route.winfo.rxid = skb_hnat_rx_id(skb);
1662 entry->ipv6_5t_route.iblk2w.winfoi = 1;
1663 entry->ipv6_5t_route.iblk2w.wdmaid = skb_hnat_wdma_id(skb);
1664#endif
1665 } else {
1666 if (IS_GMAC1_MODE && !hnat_dsa_is_enable(hnat_priv)) {
1667 entry->bfib1.vpm = 1;
1668 entry->bfib1.vlan_layer = 1;
1669
1670 if (FROM_GE_LAN(skb))
1671 entry->ipv6_5t_route.vlan1 = 1;
1672 else if (FROM_GE_WAN(skb) || FROM_GE_VIRTUAL(skb))
1673 entry->ipv6_5t_route.vlan1 = 2;
1674 }
1675
1676#if defined(CONFIG_NET_MEDIATEK_HW_QOS)
1677 if (FROM_GE_LAN(skb) || FROM_GE_WAN(skb) || FROM_GE_VIRTUAL(skb)) {
1678 entry->bfib1.vpm = 0;
1679 entry->bfib1.vlan_layer = 1;
1680 entry->ipv6_5t_route.etype = htons(HQOS_MAGIC_TAG);
1681 entry->ipv6_5t_route.vlan1 = skb_hnat_entry(skb);
1682 entry->ipv6_5t_route.iblk2.fqos = 1;
1683 }
1684#endif
1685 }
1686 entry->ipv6_5t_route.iblk2.dp = gmac_no;
1687 }
1688
1689 entry->bfib1.state = BIND;
1690
1691 return NF_ACCEPT;
1692}
1693
1694int mtk_sw_nat_hook_rx(struct sk_buff *skb)
1695{
developer99506e52021-06-30 22:03:02 +08001696 if (!IS_SPACE_AVAILABLE_HEAD(skb) || !FROM_WED(skb)) {
1697 skb_hnat_magic_tag(skb) = 0;
developerfd40db22021-04-29 10:08:25 +08001698 return NF_ACCEPT;
developer99506e52021-06-30 22:03:02 +08001699 }
developerfd40db22021-04-29 10:08:25 +08001700
1701 skb_hnat_alg(skb) = 0;
1702 skb_hnat_magic_tag(skb) = HNAT_MAGIC_TAG;
1703
1704 if (skb_hnat_iface(skb) == FOE_MAGIC_WED0)
1705 skb_hnat_sport(skb) = NR_WDMA0_PORT;
1706 else if (skb_hnat_iface(skb) == FOE_MAGIC_WED1)
1707 skb_hnat_sport(skb) = NR_WDMA1_PORT;
1708
1709 return NF_ACCEPT;
1710}
1711
1712void mtk_ppe_dev_register_hook(struct net_device *dev)
1713{
1714 int i, number = 0;
1715 struct extdev_entry *ext_entry;
1716
1717 if (!strncmp(dev->name, "wds", 3))
1718 return;
1719
1720 for (i = 1; i < MAX_IF_NUM; i++) {
1721 if (hnat_priv->wifi_hook_if[i] == dev) {
1722 pr_info("%s : %s has been registered in wifi_hook_if table[%d]\n",
1723 __func__, dev->name, i);
1724 return;
1725 }
1726 if (!hnat_priv->wifi_hook_if[i]) {
1727 if (find_extif_from_devname(dev->name)) {
1728 extif_set_dev(dev);
1729 goto add_wifi_hook_if;
1730 }
1731
1732 number = get_ext_device_number();
1733 if (number >= MAX_EXT_DEVS) {
1734 pr_info("%s : extdev array is full. %s is not registered\n",
1735 __func__, dev->name);
1736 return;
1737 }
1738
1739 ext_entry = kzalloc(sizeof(*ext_entry), GFP_KERNEL);
1740 if (!ext_entry)
1741 return;
1742
1743 strncpy(ext_entry->name, dev->name, IFNAMSIZ);
1744 dev_hold(dev);
1745 ext_entry->dev = dev;
1746 ext_if_add(ext_entry);
1747
1748add_wifi_hook_if:
1749 dev_hold(dev);
1750 hnat_priv->wifi_hook_if[i] = dev;
1751
1752 break;
1753 }
1754 }
1755 pr_info("%s : ineterface %s register (%d)\n", __func__, dev->name, i);
1756}
1757
1758void mtk_ppe_dev_unregister_hook(struct net_device *dev)
1759{
1760 int i;
1761
1762 for (i = 1; i < MAX_IF_NUM; i++) {
1763 if (hnat_priv->wifi_hook_if[i] == dev) {
1764 hnat_priv->wifi_hook_if[i] = NULL;
1765 dev_put(dev);
1766
1767 break;
1768 }
1769 }
1770
1771 extif_put_dev(dev);
1772 pr_info("%s : ineterface %s set null (%d)\n", __func__, dev->name, i);
1773}
1774
1775static unsigned int mtk_hnat_accel_type(struct sk_buff *skb)
1776{
1777 struct dst_entry *dst;
1778 struct nf_conn *ct;
1779 enum ip_conntrack_info ctinfo;
1780 const struct nf_conn_help *help;
1781
1782 /* Do not accelerate 1st round of xfrm flow, and 2nd round of xfrm flow
1783 * is from local_out which is also filtered in sanity check.
1784 */
1785 dst = skb_dst(skb);
1786 if (dst && dst_xfrm(dst))
1787 return 0;
1788
1789 ct = nf_ct_get(skb, &ctinfo);
1790 if (!ct)
1791 return 1;
1792
1793 /* rcu_read_lock()ed by nf_hook_slow */
1794 help = nfct_help(ct);
1795 if (help && rcu_dereference(help->helper))
1796 return 0;
1797
1798 return 1;
1799}
1800
1801static unsigned int mtk_hnat_nf_post_routing(
1802 struct sk_buff *skb, const struct net_device *out,
1803 unsigned int (*fn)(struct sk_buff *, const struct net_device *,
1804 struct flow_offload_hw_path *),
1805 const char *func)
1806{
1807 struct foe_entry *entry;
1808 struct flow_offload_hw_path hw_path = { .dev = (struct net_device*)out,
developere5763512021-05-21 01:04:58 +08001809 .virt_dev = (struct net_device*)out };
developerfd40db22021-04-29 10:08:25 +08001810 const struct net_device *arp_dev = out;
1811
1812 if (skb_hnat_alg(skb) || unlikely(!is_magic_tag_valid(skb) ||
1813 !IS_SPACE_AVAILABLE_HEAD(skb)))
1814 return 0;
1815
1816 if (unlikely(!skb_hnat_is_hashed(skb)))
1817 return 0;
1818
1819 if (out->netdev_ops->ndo_flow_offload_check) {
developere5763512021-05-21 01:04:58 +08001820 out->netdev_ops->ndo_flow_offload_check(&hw_path);
developerfd40db22021-04-29 10:08:25 +08001821 out = (IS_GMAC1_MODE) ? hw_path.virt_dev : hw_path.dev;
1822 }
1823
1824 if (!IS_LAN(out) && !IS_WAN(out) && !IS_EXT(out))
1825 return 0;
1826
1827 trace_printk("[%s] case hit, %x-->%s, reason=%x\n", __func__,
1828 skb_hnat_iface(skb), out->name, skb_hnat_reason(skb));
1829
developer471f6562021-05-10 20:48:34 +08001830 entry = &hnat_priv->foe_table_cpu[skb_hnat_ppe(skb)][skb_hnat_entry(skb)];
developerfd40db22021-04-29 10:08:25 +08001831
1832 switch (skb_hnat_reason(skb)) {
1833 case HIT_UNBIND_RATE_REACH:
1834 if (entry_hnat_is_bound(entry))
1835 break;
1836
1837 if (fn && !mtk_hnat_accel_type(skb))
1838 break;
1839
1840 if (fn && fn(skb, arp_dev, &hw_path))
1841 break;
1842
1843 skb_to_hnat_info(skb, out, entry, &hw_path);
1844 break;
1845 case HIT_BIND_KEEPALIVE_DUP_OLD_HDR:
1846 if (fn && !mtk_hnat_accel_type(skb))
1847 break;
1848
1849 /* update mcast timestamp*/
1850 if (hnat_priv->data->version == MTK_HNAT_V3 &&
1851 hnat_priv->data->mcast && entry->bfib1.sta == 1)
1852 entry->ipv4_hnapt.m_timestamp = foe_timestamp(hnat_priv);
1853
1854 if (entry_hnat_is_bound(entry)) {
1855 memset(skb_hnat_info(skb), 0, FOE_INFO_LEN);
1856
1857 return -1;
1858 }
1859 break;
1860 case HIT_BIND_MULTICAST_TO_CPU:
1861 case HIT_BIND_MULTICAST_TO_GMAC_CPU:
1862 /*do not forward to gdma again,if ppe already done it*/
1863 if (IS_LAN(out) || IS_WAN(out))
1864 return -1;
1865 break;
1866 }
1867
1868 return 0;
1869}
1870
1871static unsigned int
1872mtk_hnat_ipv6_nf_local_out(void *priv, struct sk_buff *skb,
1873 const struct nf_hook_state *state)
1874{
1875 struct foe_entry *entry;
1876 struct ipv6hdr *ip6h;
1877 struct iphdr _iphdr;
1878 const struct iphdr *iph;
1879 struct tcpudphdr _ports;
1880 const struct tcpudphdr *pptr;
1881 int udp = 0;
1882
1883 if (unlikely(!skb_hnat_is_hashed(skb)))
1884 return NF_ACCEPT;
1885
developer471f6562021-05-10 20:48:34 +08001886 entry = &hnat_priv->foe_table_cpu[skb_hnat_ppe(skb)][skb_hnat_entry(skb)];
developerfd40db22021-04-29 10:08:25 +08001887 if (skb_hnat_reason(skb) == HIT_UNBIND_RATE_REACH) {
1888 ip6h = ipv6_hdr(skb);
1889 if (ip6h->nexthdr == NEXTHDR_IPIP) {
1890 /* Map-E LAN->WAN: need to record orig info before fn. */
1891 if (mape_toggle) {
1892 iph = skb_header_pointer(skb, IPV6_HDR_LEN,
1893 sizeof(_iphdr), &_iphdr);
1894 switch (iph->protocol) {
1895 case IPPROTO_UDP:
1896 udp = 1;
1897 case IPPROTO_TCP:
1898 break;
1899
1900 default:
1901 return NF_ACCEPT;
1902 }
1903
1904 pptr = skb_header_pointer(skb, IPV6_HDR_LEN + iph->ihl * 4,
1905 sizeof(_ports), &_ports);
1906 entry->bfib1.udp = udp;
1907
1908#if defined(CONFIG_MEDIATEK_NETSYS_V2)
1909 entry->bfib1.pkt_type = IPV4_MAP_E;
1910 entry->ipv4_dslite.iblk2.dscp = iph->tos;
1911 entry->ipv4_dslite.new_sip = ntohl(iph->saddr);
1912 entry->ipv4_dslite.new_dip = ntohl(iph->daddr);
1913 entry->ipv4_dslite.new_sport = ntohs(pptr->src);
1914 entry->ipv4_dslite.new_dport = ntohs(pptr->dst);
1915#else
1916 entry->ipv4_hnapt.iblk2.dscp = iph->tos;
1917 entry->ipv4_hnapt.new_sip = ntohl(iph->saddr);
1918 entry->ipv4_hnapt.new_dip = ntohl(iph->daddr);
1919 entry->ipv4_hnapt.new_sport = ntohs(pptr->src);
1920 entry->ipv4_hnapt.new_dport = ntohs(pptr->dst);
1921#endif
1922 } else {
1923 entry->bfib1.pkt_type = IPV4_DSLITE;
1924 }
1925 }
1926 }
1927 return NF_ACCEPT;
1928}
1929
1930static unsigned int
1931mtk_hnat_ipv6_nf_post_routing(void *priv, struct sk_buff *skb,
1932 const struct nf_hook_state *state)
1933{
1934 post_routing_print(skb, state->in, state->out, __func__);
1935
1936 if (!mtk_hnat_nf_post_routing(skb, state->out, hnat_ipv6_get_nexthop,
1937 __func__))
1938 return NF_ACCEPT;
1939
1940 trace_printk(
1941 "%s:drop (iif=0x%x, out_dev=%s, CB2=0x%x, ppe_hash=0x%x, sport=0x%x, reason=0x%x, alg=0x%x)\n",
1942 __func__, skb_hnat_iface(skb), state->out->name, HNAT_SKB_CB2(skb)->magic,
1943 skb_hnat_entry(skb), skb_hnat_sport(skb), skb_hnat_reason(skb),
1944 skb_hnat_alg(skb));
1945
1946 return NF_DROP;
1947}
1948
1949static unsigned int
1950mtk_hnat_ipv4_nf_post_routing(void *priv, struct sk_buff *skb,
1951 const struct nf_hook_state *state)
1952{
1953 post_routing_print(skb, state->in, state->out, __func__);
1954
1955 if (!mtk_hnat_nf_post_routing(skb, state->out, hnat_ipv4_get_nexthop,
1956 __func__))
1957 return NF_ACCEPT;
1958
1959 trace_printk(
1960 "%s:drop (iif=0x%x, out_dev=%s, CB2=0x%x, ppe_hash=0x%x, sport=0x%x, reason=0x%x, alg=0x%x)\n",
1961 __func__, skb_hnat_iface(skb), state->out->name, HNAT_SKB_CB2(skb)->magic,
1962 skb_hnat_entry(skb), skb_hnat_sport(skb), skb_hnat_reason(skb),
1963 skb_hnat_alg(skb));
1964
1965 return NF_DROP;
1966}
1967
1968static unsigned int
1969mtk_pong_hqos_handler(void *priv, struct sk_buff *skb,
1970 const struct nf_hook_state *state)
1971{
1972#if defined(CONFIG_NET_MEDIATEK_HW_QOS)
1973 struct vlan_ethhdr *veth = (struct vlan_ethhdr *)skb_mac_header(skb);
1974
1975 if (eth_hdr(skb)->h_proto == HQOS_MAGIC_TAG) {
1976 skb_hnat_entry(skb) = ntohs(veth->h_vlan_TCI) & 0x3fff;
1977 skb_hnat_reason(skb) = HIT_BIND_FORCE_TO_CPU;
1978 }
1979#endif
1980
1981 if (skb_hnat_iface(skb) == FOE_MAGIC_EXT)
1982 clr_from_extge(skb);
1983
1984 /* packets from external devices -> xxx ,step 2, learning stage */
1985#if defined(CONFIG_NET_MEDIATEK_HW_QOS)
1986 if (do_ext2ge_fast_learn(state->in, skb) && (eth_hdr(skb)->h_proto != HQOS_MAGIC_TAG)) {
1987#else
1988 if (do_ext2ge_fast_learn(state->in, skb)) {
1989#endif
1990 if (!do_hnat_ext_to_ge2(skb, __func__))
1991 return NF_STOLEN;
1992 goto drop;
1993 }
1994
1995 /* packets form ge -> external device */
1996 if (do_ge2ext_fast(state->in, skb)) {
1997 if (!do_hnat_ge_to_ext(skb, __func__))
1998 return NF_STOLEN;
1999 goto drop;
2000 }
2001
2002 return NF_ACCEPT;
2003drop:
2004 printk_ratelimited(KERN_WARNING
2005 "%s:drop (in_dev=%s, iif=0x%x, CB2=0x%x, ppe_hash=0x%x, sport=0x%x, reason=0x%x, alg=0x%x)\n",
2006 __func__, state->in->name, skb_hnat_iface(skb),
2007 HNAT_SKB_CB2(skb)->magic, skb_hnat_entry(skb),
2008 skb_hnat_sport(skb), skb_hnat_reason(skb),
2009 skb_hnat_alg(skb));
2010
2011 return NF_DROP;
2012}
2013
2014static unsigned int
2015mtk_hnat_br_nf_local_out(void *priv, struct sk_buff *skb,
2016 const struct nf_hook_state *state)
2017{
2018 post_routing_print(skb, state->in, state->out, __func__);
2019
2020 if (!mtk_hnat_nf_post_routing(skb, state->out, 0, __func__))
2021 return NF_ACCEPT;
2022
2023 trace_printk(
2024 "%s:drop (iif=0x%x, out_dev=%s, CB2=0x%x, ppe_hash=0x%x, sport=0x%x, reason=0x%x, alg=0x%x)\n",
2025 __func__, skb_hnat_iface(skb), state->out->name, HNAT_SKB_CB2(skb)->magic,
2026 skb_hnat_entry(skb), skb_hnat_sport(skb), skb_hnat_reason(skb),
2027 skb_hnat_alg(skb));
2028
2029 return NF_DROP;
2030}
2031
2032static unsigned int
2033mtk_hnat_ipv4_nf_local_out(void *priv, struct sk_buff *skb,
2034 const struct nf_hook_state *state)
2035{
2036 struct sk_buff *new_skb;
2037 struct foe_entry *entry;
2038 struct iphdr *iph;
2039
2040 if (!skb_hnat_is_hashed(skb))
2041 return NF_ACCEPT;
2042
developer471f6562021-05-10 20:48:34 +08002043 entry = &hnat_priv->foe_table_cpu[skb_hnat_ppe(skb)][skb_hnat_entry(skb)];
developerfd40db22021-04-29 10:08:25 +08002044
2045 if (unlikely(skb_headroom(skb) < FOE_INFO_LEN)) {
2046 new_skb = skb_realloc_headroom(skb, FOE_INFO_LEN);
2047 if (!new_skb) {
2048 dev_info(hnat_priv->dev, "%s:drop\n", __func__);
2049 return NF_DROP;
2050 }
2051 dev_kfree_skb(skb);
2052 skb = new_skb;
2053 }
2054
2055 /* Make the flow from local not be bound. */
2056 iph = ip_hdr(skb);
2057 if (iph->protocol == IPPROTO_IPV6) {
2058 entry->udib1.pkt_type = IPV6_6RD;
2059 hnat_set_head_frags(state, skb, 0, hnat_set_alg);
2060 } else {
2061 hnat_set_head_frags(state, skb, 1, hnat_set_alg);
2062 }
2063
2064 return NF_ACCEPT;
2065}
2066
2067static unsigned int mtk_hnat_br_nf_forward(void *priv,
2068 struct sk_buff *skb,
2069 const struct nf_hook_state *state)
2070{
developer99506e52021-06-30 22:03:02 +08002071 if ((hnat_priv->data->version == MTK_HNAT_V2) &&
2072 unlikely(IS_EXT(state->in) && IS_EXT(state->out)))
developerfd40db22021-04-29 10:08:25 +08002073 hnat_set_head_frags(state, skb, 1, hnat_set_alg);
2074
2075 return NF_ACCEPT;
2076}
2077
2078static struct nf_hook_ops mtk_hnat_nf_ops[] __read_mostly = {
2079 {
2080 .hook = mtk_hnat_ipv4_nf_pre_routing,
2081 .pf = NFPROTO_IPV4,
2082 .hooknum = NF_INET_PRE_ROUTING,
2083 .priority = NF_IP_PRI_FIRST + 1,
2084 },
2085 {
2086 .hook = mtk_hnat_ipv6_nf_pre_routing,
2087 .pf = NFPROTO_IPV6,
2088 .hooknum = NF_INET_PRE_ROUTING,
2089 .priority = NF_IP_PRI_FIRST + 1,
2090 },
2091 {
2092 .hook = mtk_hnat_ipv6_nf_post_routing,
2093 .pf = NFPROTO_IPV6,
2094 .hooknum = NF_INET_POST_ROUTING,
2095 .priority = NF_IP_PRI_LAST,
2096 },
2097 {
2098 .hook = mtk_hnat_ipv6_nf_local_out,
2099 .pf = NFPROTO_IPV6,
2100 .hooknum = NF_INET_LOCAL_OUT,
2101 .priority = NF_IP_PRI_LAST,
2102 },
2103 {
2104 .hook = mtk_hnat_ipv4_nf_post_routing,
2105 .pf = NFPROTO_IPV4,
2106 .hooknum = NF_INET_POST_ROUTING,
2107 .priority = NF_IP_PRI_LAST,
2108 },
2109 {
2110 .hook = mtk_hnat_ipv4_nf_local_out,
2111 .pf = NFPROTO_IPV4,
2112 .hooknum = NF_INET_LOCAL_OUT,
2113 .priority = NF_IP_PRI_LAST,
2114 },
2115 {
2116 .hook = mtk_hnat_br_nf_local_in,
2117 .pf = NFPROTO_BRIDGE,
2118 .hooknum = NF_BR_LOCAL_IN,
2119 .priority = NF_BR_PRI_FIRST,
2120 },
2121 {
2122 .hook = mtk_hnat_br_nf_local_out,
2123 .pf = NFPROTO_BRIDGE,
2124 .hooknum = NF_BR_LOCAL_OUT,
2125 .priority = NF_BR_PRI_LAST - 1,
2126 },
2127 {
2128 .hook = mtk_pong_hqos_handler,
2129 .pf = NFPROTO_BRIDGE,
2130 .hooknum = NF_BR_PRE_ROUTING,
2131 .priority = NF_BR_PRI_FIRST,
2132 },
2133};
2134
2135int hnat_register_nf_hooks(void)
2136{
2137 return nf_register_net_hooks(&init_net, mtk_hnat_nf_ops, ARRAY_SIZE(mtk_hnat_nf_ops));
2138}
2139
2140void hnat_unregister_nf_hooks(void)
2141{
2142 nf_unregister_net_hooks(&init_net, mtk_hnat_nf_ops, ARRAY_SIZE(mtk_hnat_nf_ops));
2143}
2144
2145int whnat_adjust_nf_hooks(void)
2146{
2147 struct nf_hook_ops *hook = mtk_hnat_nf_ops;
2148 unsigned int n = ARRAY_SIZE(mtk_hnat_nf_ops);
2149
2150 if (!hook)
2151 return -1;
2152
2153 while (n-- > 0) {
2154 if (hook[n].hook == mtk_hnat_br_nf_local_in) {
2155 hook[n].hooknum = NF_BR_PRE_ROUTING;
2156 } else if (hook[n].hook == mtk_hnat_br_nf_local_out) {
2157 hook[n].hooknum = NF_BR_POST_ROUTING;
2158 } else if (hook[n].hook == mtk_pong_hqos_handler) {
2159 hook[n].hook = mtk_hnat_br_nf_forward;
2160 hook[n].hooknum = NF_BR_FORWARD;
2161 hook[n].priority = NF_BR_PRI_LAST - 1;
2162 }
2163 }
2164
2165 return 0;
2166}
2167
2168#if defined(CONFIG_NET_MEDIATEK_HW_QOS)
2169int mtk_hqos_ptype_cb(struct sk_buff *skb, struct net_device *dev,
2170 struct packet_type *pt, struct net_device *unused)
2171{
2172 struct vlan_ethhdr *veth = (struct vlan_ethhdr *)skb_mac_header(skb);
2173
2174 skb_hnat_entry(skb) = ntohs(veth->h_vlan_TCI) & 0x3fff;
2175 skb_hnat_reason(skb) = HIT_BIND_FORCE_TO_CPU;
2176
2177 do_hnat_ge_to_ext(skb, __func__);
2178
2179 return 0;
2180}
2181#endif
2182