[][Kernel][hnat][464xlat support on netfilter hnat]
[Description]
Add new feature support 464xlat on netfilter hnat
In the test environment, the skb through the user space,
resulting in loss of headroom info.
So the process changes a lot.
In order to reduce the risk, I implemented separately
and insert it into pre-routing and post-routing.
We can use xlat_toggle to control it.
[Release-log]
N/A
Change-Id: Ib00e0c6f3674ee287cb5b318d30a73da279596b9
Reviewed-on: https://gerrit.mediatek.inc/c/openwrt/feeds/mtk_openwrt_feeds/+/7395696
diff --git a/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_hnat/hnat.c b/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_hnat/hnat.c
index 9c40ac8..7cda69f 100644
--- a/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_hnat/hnat.c
+++ b/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_hnat/hnat.c
@@ -862,6 +862,8 @@
if (err)
pr_info("hnat roaming work fail\n");
+ INIT_LIST_HEAD(&hnat_priv->xlat.map_list);
+
return 0;
err_out:
diff --git a/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_hnat/hnat.h b/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_hnat/hnat.h
index f2d5dc8..8f5f37b 100644
--- a/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_hnat/hnat.h
+++ b/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_hnat/hnat.h
@@ -18,6 +18,7 @@
#include <net/netevent.h>
#include <linux/mod_devicetable.h>
#include "hnat_mcast.h"
+#include "nf_hnat_mtk.h"
/*--------------------------------------------------------------------------*/
/* Register Offset*/
@@ -849,6 +850,18 @@
enum mtk_hnat_version version;
};
+struct map46 {
+ u32 ipv4;
+ struct in6_addr ipv6;
+ struct list_head list;
+};
+
+struct xlat_conf {
+ struct list_head map_list;
+ struct in6_addr prefix;
+ int prefix_len;
+};
+
struct mtk_hnat {
struct device *dev;
void __iomem *fe_base;
@@ -890,6 +903,7 @@
struct timer_list hnat_reset_timestamp_timer;
struct timer_list hnat_mcast_check_timer;
bool nf_stat_en;
+ struct xlat_conf xlat;
};
struct extdev_entry {
@@ -951,6 +965,7 @@
#define BIT_IPV6_3T_ROUTE_EN BIT(8)
#define BIT_IPV6_5T_ROUTE_EN BIT(9)
#define BIT_IPV6_6RD_EN BIT(10)
+#define BIT_IPV6_464XLAT_EN BIT(11)
#define BIT_IPV4_NAT_EN BIT(12)
#define BIT_IPV4_NAPT_EN BIT(13)
#define BIT_IPV4_DSL_EN BIT(14)
@@ -1187,6 +1202,8 @@
struct packet_type *pt, struct net_device *unused);
extern int dbg_cpu_reason;
extern int debug_level;
+extern int xlat_toggle;
+extern struct hnat_desc headroom[DEF_ETRY_NUM];
extern int qos_dl_toggle;
extern int qos_ul_toggle;
extern int hook_toggle;
@@ -1215,6 +1232,10 @@
int entry_delete_by_mac(u8 *mac);
int entry_delete(u32 ppe_id, int index);
int hnat_warm_init(void);
+u32 hnat_get_ppe_hash(struct foe_entry *entry);
+int mtk_ppe_get_xlat_v4_by_v6(struct in6_addr *ipv6, u32 *ipv4);
+int mtk_ppe_get_xlat_v6_by_v4(u32 *ipv4, struct in6_addr *ipv6,
+ struct in6_addr *prefix);
struct hnat_accounting *hnat_get_count(struct mtk_hnat *h, u32 ppe_id,
u32 index, struct hnat_accounting *diff);
diff --git a/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_hnat/hnat_debugfs.c b/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_hnat/hnat_debugfs.c
index 9c0d691..5db9291 100644
--- a/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_hnat/hnat_debugfs.c
+++ b/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_hnat/hnat_debugfs.c
@@ -16,6 +16,8 @@
#include <linux/dma-mapping.h>
#include <linux/netdevice.h>
#include <linux/iopoll.h>
+#include <linux/inet.h>
+#include <net/ipv6.h>
#include "hnat.h"
#include "nf_hnat_mtk.h"
@@ -30,6 +32,8 @@
int qos_toggle;
int qos_dl_toggle = 1;
int qos_ul_toggle = 1;
+int xlat_toggle;
+struct hnat_desc headroom[DEF_ETRY_NUM];
unsigned int dbg_cpu_reason_cnt[MAX_CRSN_NUM];
static const char * const entry_state[] = { "INVALID", "UNBIND", "BIND", "FIN" };
@@ -394,8 +398,11 @@
pr_info("IPv4 Org IP: %pI4->%pI4\n", &saddr, &daddr);
pr_info("IPv4 New IP: %pI4->%pI4\n", &nsaddr, &ndaddr);
} else if (IS_IPV4_DSLITE(entry)) {
- pr_info("Information Block 2: %08X\n",
- entry->ipv4_dslite.info_blk2);
+ pr_info("Information Block 2: %08X (FP=%d FQOS=%d QID=%d)",
+ entry->ipv4_dslite.info_blk2,
+ entry->ipv4_dslite.iblk2.dp,
+ entry->ipv4_dslite.iblk2.fqos,
+ entry->ipv4_dslite.iblk2.qid);
pr_info("Create IPv4 Ds-Lite entry\n");
pr_info("IPv4 Ds-Lite: %pI4:%d->%pI4:%d\n", &saddr,
entry->ipv4_dslite.sport, &daddr,
@@ -463,8 +470,11 @@
entry->ipv6_5t_route.ipv6_dip3,
entry->ipv6_5t_route.dport);
} else if (IS_IPV6_6RD(entry)) {
- pr_info("Information Block 2: %08X\n",
- entry->ipv6_6rd.info_blk2);
+ pr_info("Information Block 2: %08X (FP=%d FQOS=%d QID=%d)",
+ entry->ipv6_6rd.info_blk2,
+ entry->ipv6_6rd.iblk2.dp,
+ entry->ipv6_6rd.iblk2.fqos,
+ entry->ipv6_6rd.iblk2.qid);
pr_info("Create IPv6 6RD entry\n");
pr_info("ING SIPv6->DIPv6: %08X:%08X:%08X:%08X:%d-> %08X:%08X:%08X:%08X:%d\n",
entry->ipv6_6rd.ipv6_sip0, entry->ipv6_6rd.ipv6_sip1,
@@ -2396,6 +2406,211 @@
.release = single_release,
};
+static int hnat_xlat_toggle_read(struct seq_file *m, void *private)
+{
+ pr_info("value=%d, xlat is %s now!\n",
+ xlat_toggle, (xlat_toggle) ? "enabled" : "disabled");
+
+ return 0;
+}
+
+static int hnat_xlat_toggle_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, hnat_xlat_toggle_read, file->private_data);
+}
+
+static ssize_t hnat_xlat_toggle_write(struct file *file,
+ const char __user *buffer,
+ size_t count, loff_t *data)
+{
+ char buf[8] = {0};
+ int len = count;
+ int i;
+ u32 ppe_cfg;
+
+ if ((len > 8) || copy_from_user(buf, buffer, len))
+ return -EFAULT;
+
+ if (buf[0] == '1' && !xlat_toggle) {
+ pr_info("xlat is going to be enabled !\n");
+ xlat_toggle = 1;
+ } else if (buf[0] == '0' && xlat_toggle) {
+ pr_info("xlat is going to be disabled !\n");
+ xlat_toggle = 0;
+ }
+
+ for (i = 0; i < CFG_PPE_NUM; i++) {
+ ppe_cfg = readl(hnat_priv->ppe_base[i] + PPE_FLOW_CFG);
+
+ if (xlat_toggle)
+ ppe_cfg |= BIT_IPV6_464XLAT_EN;
+ else
+ ppe_cfg &= ~BIT_IPV6_464XLAT_EN;
+
+ writel(ppe_cfg, hnat_priv->ppe_base[i] + PPE_FLOW_CFG);
+ }
+
+ return len;
+}
+
+static const struct file_operations hnat_xlat_toggle_fops = {
+ .open = hnat_xlat_toggle_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .write = hnat_xlat_toggle_write,
+ .release = single_release,
+};
+
+int mtk_ppe_get_xlat_v6_by_v4(u32 *ipv4, struct in6_addr *ipv6,
+ struct in6_addr *prefix)
+{
+ struct mtk_hnat *h = hnat_priv;
+ struct map46 *m = NULL;
+
+ list_for_each_entry(m, &h->xlat.map_list, list) {
+ if (m->ipv4 == *ipv4) {
+ memcpy(ipv6, &m->ipv6, sizeof(*ipv6));
+ memcpy(prefix, &h->xlat.prefix, sizeof(*ipv6));
+ return 0;
+ }
+ }
+
+ return -1;
+}
+
+int mtk_ppe_get_xlat_v4_by_v6(struct in6_addr *ipv6, u32 *ipv4)
+{
+ struct mtk_hnat *h = hnat_priv;
+ struct map46 *m = NULL;
+
+ list_for_each_entry(m, &h->xlat.map_list, list) {
+ if (ipv6_addr_equal(ipv6, &m->ipv6)) {
+ *ipv4 = m->ipv4;
+ return 0;
+ }
+ }
+
+ return -1;
+}
+
+static int hnat_xlat_cfg_read(struct seq_file *m, void *private)
+{
+ pr_info("\n464XLAT Config Command Usage:\n");
+ pr_info("Show HQoS usage:\n");
+ pr_info(" cat /sys/kernel/debug/hnat/xlat_cfg\n");
+ pr_info("Set ipv6 prefix :\n");
+ pr_info(" echo prefix <prefix> > /sys/kernel/debug/hnat/xlat_cfg\n");
+ pr_info("Set ipv6 prefix len :\n");
+ pr_info(" echo pfx_len <len> > /sys/kernel/debug/hnat/xlat_cfg\n");
+ pr_info("Add map :\n");
+ pr_info("echo map add <ipv4> <ipv6> > /sys/kernel/debug/hnat/xlat_cfg\n");
+ pr_info("Delete map :\n");
+ pr_info("echo map del <ipv4> <ipv6> > /sys/kernel/debug/hnat/xlat_cfg\n");
+ pr_info("Show config:\n");
+ pr_info("echo show > /sys/kernel/debug/hnat/xlat_cfg\n");
+
+ return 0;
+}
+
+static int hnat_xlat_cfg_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, hnat_xlat_cfg_read, file->private_data);
+}
+
+static ssize_t hnat_xlat_cfg_write(struct file *file, const char __user *buffer,
+ size_t count, loff_t *data)
+{
+ struct mtk_hnat *h = hnat_priv;
+ int len = count;
+ char buf[256] = {0}, v4_str[64] = {0}, v6_str[64] = {0};
+ struct map46 *map = NULL, *m = NULL, *next = NULL;
+ struct in6_addr ipv6;
+ u32 ipv4;
+
+ if ((len > 256) || copy_from_user(buf, buffer, len))
+ return -EFAULT;
+
+ if (!strncmp(buf, "prefix", 6)) {
+ if (sscanf(buf, "prefix %s\n", v6_str) != 1) {
+ pr_info("input error\n");
+ return -1;
+ }
+
+ in6_pton(v6_str, -1, (u8 *)&h->xlat.prefix, -1, NULL);
+ pr_info("set prefix = %pI6\n", &h->xlat.prefix);
+ } else if (!strncmp(buf, "pfx_len", 7)) {
+ if (sscanf(buf, "pfx_len %d", &h->xlat.prefix_len) != 1) {
+ pr_info("input error\n");
+ return -1;
+ }
+
+ pr_info("set pfx_len = %d\n", h->xlat.prefix_len);
+ } else if (!strncmp(buf, "map add", 7)) {
+ if (sscanf(buf, "map add %s %s\n", v4_str, v6_str) != 2) {
+ pr_info("input error\n");
+ return -1;
+ }
+
+ map = kmalloc(sizeof(struct map46), GFP_KERNEL);
+ if (!map)
+ return -1;
+
+ in4_pton(v4_str, -1, (u8 *)&map->ipv4, -1, NULL);
+ in6_pton(v6_str, -1, (u8 *)&map->ipv6, -1, NULL);
+ list_for_each_entry(m, &h->xlat.map_list, list) {
+ if (ipv6_addr_equal(&map->ipv6, &m->ipv6) &&
+ map->ipv4 == m->ipv4) {
+ pr_info("this map already added.\n");
+ kfree(map);
+ return -1;
+ }
+ }
+
+ list_add(&map->list, &h->xlat.map_list);
+ pr_info("add map: %pI4<=>%pI6\n", &map->ipv4, &map->ipv6);
+ } else if (!strncmp(buf, "map del", 7)) {
+ if (sscanf(buf, "map del %s %s\n", v4_str, v6_str) != 2) {
+ pr_info("input error\n");
+ return -1;
+ }
+
+ in4_pton(v4_str, -1, (u8 *)&ipv4, -1, NULL);
+ in6_pton(v6_str, -1, (u8 *)&ipv6, -1, NULL);
+
+ list_for_each_entry_safe(m, next, &h->xlat.map_list, list) {
+ if (ipv6_addr_equal(&ipv6, &m->ipv6) &&
+ ipv4 == m->ipv4) {
+ list_del(&m->list);
+ kfree(m);
+ pr_info("del map: %s<=>%s\n", v4_str, v6_str);
+ return len;
+ }
+ }
+
+ pr_info("not found map: %s<=>%s\n", v4_str, v6_str);
+ } else if (!strncmp(buf, "show", 4)) {
+ pr_info("prefix=%pI6\n", &h->xlat.prefix);
+ pr_info("prefix_len=%d\n", h->xlat.prefix_len);
+
+ list_for_each_entry(m, &h->xlat.map_list, list) {
+ pr_info("map: %pI4<=>%pI6\n", &m->ipv4, &m->ipv6);
+ }
+ } else {
+ pr_info("input error\n");
+ return -1;
+ }
+
+ return len;
+}
+
+static const struct file_operations hnat_xlat_cfg_fops = {
+ .open = hnat_xlat_cfg_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .write = hnat_xlat_cfg_write,
+ .release = single_release,
+};
+
static void hnat_qos_toggle_usage(void)
{
pr_info("\nHQoS toggle Command Usage:\n");
@@ -2594,12 +2809,33 @@
.release = single_release,
};
-static u32 hnat_get_ppe_hash(u32 sip, u32 dip, u32 sport, u32 dport)
+u32 hnat_get_ppe_hash(struct foe_entry *entry)
{
- u32 hv1 = sport << 16 | dport;
- u32 hv2 = dip;
- u32 hv3 = sip;
- u32 hash;
+ u32 hv1, hv2, hv3, hash;
+
+ switch (entry->bfib1.pkt_type) {
+ case IPV4_HNAPT:
+ case IPV4_HNAT:
+ case IPV4_DSLITE:
+ hv1 = entry->ipv4_hnapt.sport << 16 | entry->ipv4_hnapt.dport;
+ hv2 = entry->ipv4_hnapt.dip;
+ hv3 = entry->ipv4_hnapt.sip;
+ break;
+ case IPV6_3T_ROUTE:
+ case IPV6_5T_ROUTE:
+ case IPV6_6RD:
+ hv1 = entry->ipv6_5t_route.ipv6_sip3 ^
+ entry->ipv6_5t_route.ipv6_dip3;
+ hv1 ^= entry->ipv6_5t_route.sport << 16 |
+ entry->ipv6_5t_route.dport;
+ hv2 = entry->ipv6_5t_route.ipv6_sip2 ^
+ entry->ipv6_5t_route.ipv6_dip2;
+ hv2 ^= entry->ipv6_5t_route.ipv6_dip0;
+ hv3 = entry->ipv6_5t_route.ipv6_sip1 ^
+ entry->ipv6_5t_route.ipv6_dip1;
+ hv3 ^= entry->ipv6_5t_route.ipv6_sip0;
+ break;
+ }
hash = (hv1 & hv2) | ((~hv1) & hv3);
hash = (hash >> 24) | ((hash & 0xffffff) << 8);
@@ -2755,12 +2991,8 @@
entry.ipv4_hnapt.smac_hi = swab32(*((u32 *)smac));
entry.ipv4_hnapt.smac_lo = swab16(*((u16 *)&smac[4]));
- if (hash == -1) {
- hash = hnat_get_ppe_hash(entry.ipv4_hnapt.sip,
- entry.ipv4_hnapt.dip,
- entry.ipv4_hnapt.sport,
- entry.ipv4_hnapt.dport);
- }
+ if (hash == -1)
+ hash = hnat_get_ppe_hash(&entry);
foe = &hnat_priv->foe_table_cpu[ppe_id][hash];
while ((foe->ipv4_hnapt.bfib1.state == BIND) && (coll < 4)) {
@@ -2939,6 +3171,10 @@
&hnat_ppd_if_fops);
debugfs_create_file("static_entry", 0444, root, h,
&hnat_static_fops);
+ debugfs_create_file("xlat_toggle", 0444, root, h,
+ &hnat_xlat_toggle_fops);
+ debugfs_create_file("xlat_cfg", 0444, root, h,
+ &hnat_xlat_cfg_fops);
for (i = 0; i < hnat_priv->data->num_of_sch; i++) {
ret = snprintf(name, sizeof(name), "qdma_sch%ld", i);
diff --git a/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_hnat/hnat_nf_hook.c b/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_hnat/hnat_nf_hook.c
index 16cbed3..e0d5e10 100644
--- a/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_hnat/hnat_nf_hook.c
+++ b/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_hnat/hnat_nf_hook.c
@@ -688,7 +688,20 @@
return -1;
}
+void mtk_464xlat_pre_process(struct sk_buff *skb)
+{
+ struct foe_entry *foe;
+
+ foe = &hnat_priv->foe_table_cpu[skb_hnat_ppe(skb)][skb_hnat_entry(skb)];
+ if (foe->bfib1.state != BIND &&
+ skb_hnat_reason(skb) == HIT_UNBIND_RATE_REACH)
+ memcpy(&headroom[skb_hnat_entry(skb)], skb->head,
+ sizeof(struct hnat_desc));
+ if (foe->bfib1.state == BIND)
+ memset(&headroom[skb_hnat_entry(skb)], 0,
+ sizeof(struct hnat_desc));
+}
static unsigned int is_ppe_support_type(struct sk_buff *skb)
{
@@ -786,6 +799,9 @@
if (is_from_mape(skb))
clr_from_extge(skb);
#endif
+ if (xlat_toggle)
+ mtk_464xlat_pre_process(skb);
+
return NF_ACCEPT;
drop:
if (skb)
@@ -831,6 +847,8 @@
return NF_STOLEN;
goto drop;
}
+ if (xlat_toggle)
+ mtk_464xlat_pre_process(skb);
return NF_ACCEPT;
drop:
@@ -2111,8 +2129,290 @@
atomic64_add(diff.bytes, &counter[CTINFO2DIR(ctinfo)].bytes);
}
}
+}
+
+int mtk_464xlat_fill_mac(struct foe_entry *entry, struct sk_buff *skb,
+ const struct net_device *out, bool l2w)
+{
+ const struct in6_addr *ipv6_nexthop;
+ struct dst_entry *dst = skb_dst(skb);
+ struct neighbour *neigh = NULL;
+ struct rtable *rt = (struct rtable *)dst;
+ u32 nexthop;
+
+ rcu_read_lock_bh();
+ if (l2w) {
+ ipv6_nexthop = rt6_nexthop((struct rt6_info *)dst,
+ &ipv6_hdr(skb)->daddr);
+ neigh = __ipv6_neigh_lookup_noref(dst->dev, ipv6_nexthop);
+ if (unlikely(!neigh)) {
+ dev_notice(hnat_priv->dev, "%s:No neigh (daddr=%pI6)\n",
+ __func__, &ipv6_hdr(skb)->daddr);
+ rcu_read_unlock_bh();
+ return -1;
+ }
+ } else {
+ nexthop = (__force u32)rt_nexthop(rt, ip_hdr(skb)->daddr);
+ neigh = __ipv4_neigh_lookup_noref(dst->dev, nexthop);
+ if (unlikely(!neigh)) {
+ dev_notice(hnat_priv->dev, "%s:No neigh (daddr=%pI4)\n",
+ __func__, &ip_hdr(skb)->daddr);
+ rcu_read_unlock_bh();
+ return -1;
+ }
+ }
+ rcu_read_unlock_bh();
+
+ entry->ipv4_dslite.dmac_hi = swab32(*((u32 *)neigh->ha));
+ entry->ipv4_dslite.dmac_lo = swab16(*((u16 *)&neigh->ha[4]));
+ entry->ipv4_dslite.smac_hi = swab32(*((u32 *)out->dev_addr));
+ entry->ipv4_dslite.smac_lo = swab16(*((u16 *)&out->dev_addr[4]));
+
+ return 0;
+}
+
+int mtk_464xlat_get_hash(struct sk_buff *skb, u32 *hash, bool l2w)
+{
+ struct in6_addr addr_v6, prefix;
+ struct ipv6hdr *ip6h;
+ struct iphdr *iph;
+ struct tcpudphdr *pptr, _ports;
+ struct foe_entry tmp;
+ u32 addr, protoff;
+
+ if (l2w) {
+ ip6h = ipv6_hdr(skb);
+ if (mtk_ppe_get_xlat_v4_by_v6(&ip6h->daddr, &addr))
+ return -1;
+ protoff = IPV6_HDR_LEN;
+
+ tmp.bfib1.pkt_type = IPV4_HNAPT;
+ tmp.ipv4_hnapt.sip = ntohl(ip6h->saddr.s6_addr32[3]);
+ tmp.ipv4_hnapt.dip = ntohl(addr);
+ } else {
+ iph = ip_hdr(skb);
+ if (mtk_ppe_get_xlat_v6_by_v4(&iph->saddr, &addr_v6, &prefix))
+ return -1;
+
+ protoff = iph->ihl * 4;
+
+ tmp.bfib1.pkt_type = IPV6_5T_ROUTE;
+ tmp.ipv6_5t_route.ipv6_sip0 = ntohl(addr_v6.s6_addr32[0]);
+ tmp.ipv6_5t_route.ipv6_sip1 = ntohl(addr_v6.s6_addr32[1]);
+ tmp.ipv6_5t_route.ipv6_sip2 = ntohl(addr_v6.s6_addr32[2]);
+ tmp.ipv6_5t_route.ipv6_sip3 = ntohl(addr_v6.s6_addr32[3]);
+ tmp.ipv6_5t_route.ipv6_dip0 = ntohl(prefix.s6_addr32[0]);
+ tmp.ipv6_5t_route.ipv6_dip1 = ntohl(prefix.s6_addr32[1]);
+ tmp.ipv6_5t_route.ipv6_dip2 = ntohl(prefix.s6_addr32[2]);
+ tmp.ipv6_5t_route.ipv6_dip3 = ntohl(iph->daddr);
+ }
+
+ pptr = skb_header_pointer(skb, protoff,
+ sizeof(_ports), &_ports);
+ if (unlikely(!pptr))
+ return -1;
+
+ if (l2w) {
+ tmp.ipv4_hnapt.sport = ntohs(pptr->src);
+ tmp.ipv4_hnapt.dport = ntohs(pptr->dst);
+ } else {
+ tmp.ipv6_5t_route.sport = ntohs(pptr->src);
+ tmp.ipv6_5t_route.dport = ntohs(pptr->dst);
+ }
+
+ *hash = hnat_get_ppe_hash(&tmp);
+
+ return 0;
+}
+
+void mtk_464xlat_fill_info1(struct foe_entry *entry,
+ struct sk_buff *skb, bool l2w)
+{
+ entry->bfib1.cah = 1;
+ entry->bfib1.ttl = 1;
+ entry->bfib1.state = BIND;
+ entry->bfib1.time_stamp = readl(hnat_priv->fe_base + 0x0010) & (0xFF);
+ if (l2w) {
+ entry->bfib1.pkt_type = IPV4_DSLITE;
+ entry->bfib1.udp = ipv6_hdr(skb)->nexthdr ==
+ IPPROTO_UDP ? 1 : 0;
+ } else {
+ entry->bfib1.pkt_type = IPV6_6RD;
+ entry->bfib1.udp = ip_hdr(skb)->protocol ==
+ IPPROTO_UDP ? 1 : 0;
+ }
+}
+
+void mtk_464xlat_fill_info2(struct foe_entry *entry, bool l2w)
+{
+ entry->ipv4_dslite.iblk2.mibf = 1;
+ entry->ipv4_dslite.iblk2.port_ag = 0xF;
+
+ if (l2w)
+ entry->ipv4_dslite.iblk2.dp = NR_GMAC2_PORT;
+ else
+ entry->ipv6_6rd.iblk2.dp = NR_GMAC1_PORT;
+}
+
+void mtk_464xlat_fill_ipv4(struct foe_entry *entry, struct sk_buff *skb,
+ struct foe_entry *foe, bool l2w)
+{
+ struct iphdr *iph;
+
+ if (l2w) {
+ entry->ipv4_dslite.sip = foe->ipv4_dslite.sip;
+ entry->ipv4_dslite.dip = foe->ipv4_dslite.dip;
+ entry->ipv4_dslite.sport = foe->ipv4_dslite.sport;
+ entry->ipv4_dslite.dport = foe->ipv4_dslite.dport;
+ } else {
+ iph = ip_hdr(skb);
+ entry->ipv6_6rd.tunnel_sipv4 = ntohl(iph->saddr);
+ entry->ipv6_6rd.tunnel_dipv4 = ntohl(iph->daddr);
+ entry->ipv6_6rd.sport = foe->ipv6_6rd.sport;
+ entry->ipv6_6rd.dport = foe->ipv6_6rd.dport;
+ entry->ipv6_6rd.hdr_chksum = ppe_get_chkbase(iph);
+ entry->ipv6_6rd.ttl = iph->ttl;
+ entry->ipv6_6rd.dscp = iph->tos;
+ entry->ipv6_6rd.flag = (ntohs(iph->frag_off) >> 13);
+ }
+}
+
+int mtk_464xlat_fill_ipv6(struct foe_entry *entry, struct sk_buff *skb,
+ struct foe_entry *foe, bool l2w)
+{
+ struct ipv6hdr *ip6h;
+ struct in6_addr addr_v6, prefix;
+ u32 addr;
+
+ if (l2w) {
+ ip6h = ipv6_hdr(skb);
+
+ if (mtk_ppe_get_xlat_v4_by_v6(&ip6h->daddr, &addr))
+ return -1;
+
+ if (mtk_ppe_get_xlat_v6_by_v4(&addr, &addr_v6, &prefix))
+ return -1;
+
+ entry->ipv4_dslite.tunnel_sipv6_0 =
+ ntohl(prefix.s6_addr32[0]);
+ entry->ipv4_dslite.tunnel_sipv6_1 =
+ ntohl(ip6h->saddr.s6_addr32[1]);
+ entry->ipv4_dslite.tunnel_sipv6_2 =
+ ntohl(ip6h->saddr.s6_addr32[2]);
+ entry->ipv4_dslite.tunnel_sipv6_3 =
+ ntohl(ip6h->saddr.s6_addr32[3]);
+ entry->ipv4_dslite.tunnel_dipv6_0 =
+ ntohl(ip6h->daddr.s6_addr32[0]);
+ entry->ipv4_dslite.tunnel_dipv6_1 =
+ ntohl(ip6h->daddr.s6_addr32[1]);
+ entry->ipv4_dslite.tunnel_dipv6_2 =
+ ntohl(ip6h->daddr.s6_addr32[2]);
+ entry->ipv4_dslite.tunnel_dipv6_3 =
+ ntohl(ip6h->daddr.s6_addr32[3]);
+
+ ppe_fill_flow_lbl(entry, ip6h);
+ entry->ipv4_dslite.priority = ip6h->priority;
+ entry->ipv4_dslite.hop_limit = ip6h->hop_limit;
+
+ } else {
+ entry->ipv6_6rd.ipv6_sip0 = foe->ipv6_6rd.ipv6_sip0;
+ entry->ipv6_6rd.ipv6_sip1 = foe->ipv6_6rd.ipv6_sip1;
+ entry->ipv6_6rd.ipv6_sip2 = foe->ipv6_6rd.ipv6_sip2;
+ entry->ipv6_6rd.ipv6_sip3 = foe->ipv6_6rd.ipv6_sip3;
+ entry->ipv6_6rd.ipv6_dip0 = foe->ipv6_6rd.ipv6_dip0;
+ entry->ipv6_6rd.ipv6_dip1 = foe->ipv6_6rd.ipv6_dip1;
+ entry->ipv6_6rd.ipv6_dip2 = foe->ipv6_6rd.ipv6_dip2;
+ entry->ipv6_6rd.ipv6_dip3 = foe->ipv6_6rd.ipv6_dip3;
+ }
+
+ return 0;
+}
+
+int mtk_464xlat_fill_l2(struct foe_entry *entry, struct sk_buff *skb,
+ const struct net_device *dev, bool l2w)
+{
+ const unsigned int *port_reg;
+ int port_index;
+ u16 sp_tag;
+
+ if (l2w)
+ entry->ipv4_dslite.etype = ETH_P_IP;
+ else {
+ if (IS_DSA_LAN(dev)) {
+ port_reg = of_get_property(dev->dev.of_node,
+ "reg", NULL);
+ if (unlikely(!port_reg))
+ return -1;
+
+ port_index = be32_to_cpup(port_reg);
+ sp_tag = BIT(port_index);
+
+ entry->bfib1.vlan_layer = 1;
+ entry->bfib1.vpm = 0;
+ entry->ipv6_6rd.etype = sp_tag;
+ } else
+ entry->ipv6_6rd.etype = ETH_P_IPV6;
+ }
+
+ if (mtk_464xlat_fill_mac(entry, skb, dev, l2w))
+ return -1;
+
+ return 0;
}
+
+int mtk_464xlat_fill_l3(struct foe_entry *entry, struct sk_buff *skb,
+ struct foe_entry *foe, bool l2w)
+{
+ mtk_464xlat_fill_ipv4(entry, skb, foe, l2w);
+
+ if (mtk_464xlat_fill_ipv6(entry, skb, foe, l2w))
+ return -1;
+
+ return 0;
+}
+
+int mtk_464xlat_post_process(struct sk_buff *skb, const struct net_device *out)
+{
+ struct foe_entry *foe, entry = {};
+ u32 hash;
+ bool l2w;
+
+ if (skb->protocol == htons(ETH_P_IPV6))
+ l2w = true;
+ else if (skb->protocol == htons(ETH_P_IP))
+ l2w = false;
+ else
+ return -1;
+
+ if (mtk_464xlat_get_hash(skb, &hash, l2w))
+ return -1;
+
+ if (hash >= hnat_priv->foe_etry_num)
+ return -1;
+
+ if (headroom[hash].crsn != HIT_UNBIND_RATE_REACH)
+ return -1;
+
+ foe = &hnat_priv->foe_table_cpu[headroom_ppe(headroom[hash])][hash];
+
+ mtk_464xlat_fill_info1(&entry, skb, l2w);
+
+ if (mtk_464xlat_fill_l3(&entry, skb, foe, l2w))
+ return -1;
+
+ mtk_464xlat_fill_info2(&entry, l2w);
+
+ if (mtk_464xlat_fill_l2(&entry, skb, out, l2w))
+ return -1;
+
+ /* We must ensure all info has been updated before set to hw */
+ wmb();
+ memcpy(foe, &entry, sizeof(struct foe_entry));
+
+ return 0;
+}
+
static unsigned int mtk_hnat_nf_post_routing(
struct sk_buff *skb, const struct net_device *out,
unsigned int (*fn)(struct sk_buff *, const struct net_device *,
@@ -2124,6 +2424,9 @@
.virt_dev = (struct net_device*)out };
const struct net_device *arp_dev = out;
+ if (xlat_toggle && !mtk_464xlat_post_process(skb, out))
+ return 0;
+
if (skb_hnat_alg(skb) || unlikely(!is_magic_tag_valid(skb) ||
!IS_SPACE_AVAILABLE_HEAD(skb)))
return 0;
diff --git a/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_hnat/nf_hnat_mtk.h b/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_hnat/nf_hnat_mtk.h
index 7cd23a2..d9eba8d 100644
--- a/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_hnat/nf_hnat_mtk.h
+++ b/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_hnat/nf_hnat_mtk.h
@@ -120,6 +120,16 @@
(skb_hnat_iface(skb) == FOE_MAGIC_WED1 && CFG_PPE_NUM > 1))
#define skb_hnat_ppe(skb) \
(skb_hnat_ppe2(skb) ? 2 : (skb_hnat_ppe1(skb) ? 1 : 0))
+#define headroom_iface(h) (h.iface)
+#define headroom_ppe1(h) \
+ ((headroom_iface(h) == FOE_MAGIC_GE_LAN2 || \
+ headroom_iface(h) == FOE_MAGIC_WED2) && CFG_PPE_NUM == 3)
+#define headroom_ppe2(h) \
+ ((headroom_iface(h) == FOE_MAGIC_GE_LAN2 || \
+ headroom_iface(h) == FOE_MAGIC_WED2) && CFG_PPE_NUM == 3)
+#define headroom_ppe(h) \
+ (headroom_ppe2(h) ? 2 : (headroom_ppe1(h) ? 1 : 0))
+
#define do_ext2ge_fast_try(dev, skb) \
((skb_hnat_iface(skb) == FOE_MAGIC_EXT) && !is_from_extge(skb))
#define set_from_extge(skb) (HNAT_SKB_CB2(skb)->magic = 0x78786688)