[][[Eagle][SQC3.0][BE19000][MT7996][E2][MT7976_MT7977][256][ePA][MT7988A][WiFi] [MAP][SnS][Muti-client][UES]Traffic stuck in Agent2 with invalid tid 8 found]
[Description]
Change ppe entry setup method
[Release-log]
-add lock protect
-change wifi tx write entry method from field to block
-use hw keep qos function of jaguar
Change-Id: I59ed77456d059768315749abad1626bf35cbeb76
Reviewed-on: https://gerrit.mediatek.inc/c/openwrt/feeds/mtk_openwrt_feeds/+/8041319
diff --git a/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_hnat/hnat.c b/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_hnat/hnat.c
index a3eef58..37d5d20 100644
--- a/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_hnat/hnat.c
+++ b/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_hnat/hnat.c
@@ -409,8 +409,13 @@
hnat_priv->g_ppdev = dev_get_by_name(&init_net, hnat_priv->ppd);
hnat_priv->g_wandev = dev_get_by_name(&init_net, hnat_priv->wan);
+ if (hnat_priv->data->version == MTK_HNAT_V3) {
+ cr_set_field(hnat_priv->ppe_base[ppe_id] + PPE_GLO_CFG, TSID_EN, 1);
+ cr_set_field(hnat_priv->ppe_base[ppe_id] + PPE_TB_CFG, DSCP_TRFC_ECN_EN, 1);
+ }
dev_info(hnat_priv->dev, "PPE%d hwnat start\n", ppe_id);
+ spin_lock_init(&hnat_priv->entry_lock);
return 0;
}
diff --git a/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_hnat/hnat.h b/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_hnat/hnat.h
index 331fce9..bd61256 100644
--- a/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_hnat/hnat.h
+++ b/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_hnat/hnat.h
@@ -146,6 +146,7 @@
#define SCAN_MODE (0x3 << 16) /* RW */
#define XMODE (0x3 << 18) /* RW */
#define TICK_SEL (0x1 << 24) /* RW */
+#define DSCP_TRFC_ECN_EN (0x1 << 25) /* RW */
/*PPE_CAH_CTRL mask*/
@@ -183,6 +184,7 @@
/*PPE_GLO_CFG mask*/
#define PPE_EN (0x1 << 0) /* RW */
+#define TSID_EN (0x1 << 1) /* RW */
#define TTL0_DRP (0x1 << 4) /* RW */
#define MCAST_TB_EN (0x1 << 7) /* RW */
#define MCAST_HASH (0x3 << 12) /* RW */
@@ -378,7 +380,14 @@
u16 m_timestamp; /* For mcast*/
u16 resv1;
u32 resv2;
- u32 resv3 : 26;
+#if defined(CONFIG_MEDIATEK_NETSYS_V3)
+ u32 resv3_1 : 9;
+ u32 eg_keep_ecn : 1;
+ u32 eg_keep_dscp : 1;
+ u32 resv3_2:15;
+#else
+ u32 resv3:26;
+#endif
u32 act_dp : 6; /* UDF */
u16 vlan1;
u16 etype;
@@ -433,7 +442,14 @@
u8 flow_lbl[3]; /* in order to consist with Linux kernel (should be 20bits) */
u8 priority; /* in order to consist with Linux kernel (should be 8bits) */
u32 hop_limit : 8;
- u32 resv2 : 18;
+#if defined(CONFIG_MEDIATEK_NETSYS_V3)
+ u32 resv2_1 : 1;
+ u32 eg_keep_ecn : 1;
+ u32 eg_keep_cls : 1;
+ u32 resv2_2 : 15;
+#else
+ u32 resv2 : 18;
+#endif
u32 act_dp : 6; /* UDF */
union {
@@ -495,7 +511,14 @@
u8 flow_lbl[3]; /* in order to consist with Linux kernel (should be 20bits) */
u8 priority; /* in order to consist with Linux kernel (should be 8bits) */
u32 hop_limit : 8;
+#if defined(CONFIG_MEDIATEK_NETSYS_V3)
+ u32 resv2_1 : 1;
+ u32 eg_keep_ecn : 1;
+ u32 eg_keep_dscp : 1;
+ u32 resv2_2 : 15;
+#else
u32 resv2 : 18;
+#endif
u32 act_dp : 6; /* UDF */
union {
@@ -561,7 +584,14 @@
u32 resv1;
u32 resv2;
u32 resv3;
+#if defined(CONFIG_MEDIATEK_NETSYS_V3)
+ u32 resv4_1 : 9;
+ u32 eg_keep_ecn : 1;
+ u32 eg_keep_cls : 1;
+ u32 resv4_2 : 15;
+#else
u32 resv4 : 26;
+#endif
u32 act_dp : 6; /* UDF */
union {
@@ -618,7 +648,14 @@
u32 resv1;
u32 resv2;
u32 resv3;
+#if defined(CONFIG_MEDIATEK_NETSYS_V3)
+ u32 resv4_1 : 9;
+ u32 eg_keep_ecn : 1;
+ u32 eg_keep_cls : 1;
+ u32 resv4_2 : 15;
+#else
u32 resv4 : 26;
+#endif
u32 act_dp : 6; /* UDF */
union {
@@ -679,9 +716,19 @@
u32 dscp : 8;
u32 ttl : 8;
u32 flag : 3;
+#if defined(CONFIG_MEDIATEK_NETSYS_V3)
+ u32 resv1_1 : 6;
+ u32 eg_keep_ecn : 1;
+ u32 eg_keep_cls : 1;
+ u32 eg_keep_tnl_qos : 1;
+ u32 resv1_2 : 4;
+ u32 per_flow_6rd_id : 1;
+ u32 resv2 : 9;
+#else
u32 resv1 : 13;
u32 per_flow_6rd_id : 1;
u32 resv2 : 9;
+#endif
u32 act_dp : 6; /* UDF */
union {
@@ -904,6 +951,7 @@
struct timer_list hnat_mcast_check_timer;
bool nf_stat_en;
struct xlat_conf xlat;
+ spinlock_t entry_lock;
};
struct extdev_entry {
diff --git a/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_hnat/hnat_debugfs.c b/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_hnat/hnat_debugfs.c
index f648386..fc7d216 100644
--- a/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_hnat/hnat_debugfs.c
+++ b/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_hnat/hnat_debugfs.c
@@ -917,7 +917,7 @@
entry = h->foe_table_cpu[ppe_id];
end = h->foe_table_cpu[ppe_id] + hnat_priv->foe_etry_num;
while (entry < end) {
- if (!entry->bfib1.state) {
+ if ((!entry->bfib1.state) && (debug_level < 7)) {
entry++;
entry_index++;
continue;
diff --git a/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_hnat/hnat_nf_hook.c b/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_hnat/hnat_nf_hook.c
index d34ac9d..ef3208d 100644
--- a/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_hnat/hnat_nf_hook.c
+++ b/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_hnat/hnat_nf_hook.c
@@ -1284,6 +1284,10 @@
ntohs(pptr->src);
entry.ipv4_mape.new_dport =
ntohs(pptr->dst);
+#if defined(CONFIG_MEDIATEK_NETSYS_V3)
+ entry.ipv4_mape.eg_keep_ecn = 1;
+ entry.ipv4_mape.eg_keep_dscp = 1;
+#endif
}
#endif
@@ -1311,6 +1315,11 @@
if (hnat_priv->data->per_flow_accounting)
entry.ipv4_dslite.iblk2.mibf = 1;
+#if defined(CONFIG_MEDIATEK_NETSYS_V3)
+ entry.ipv4_dslite.eg_keep_ecn = 1;
+ entry.ipv4_dslite.eg_keep_cls = 1;
+#endif
+
} else {
entry.ipv4_hnapt.iblk2.dscp = iph->tos;
if (hnat_priv->data->per_flow_accounting)
@@ -1350,6 +1359,11 @@
entry.ipv4_hnapt.new_dport = ntohs(pptr->dst);
}
+#if defined(CONFIG_MEDIATEK_NETSYS_V3)
+ entry.ipv4_hnapt.eg_keep_ecn = 1;
+ entry.ipv4_hnapt.eg_keep_dscp = 1;
+#endif
+
break;
default:
@@ -1393,6 +1407,11 @@
foe->ipv6_6rd.tunnel_sipv4;
entry.ipv6_6rd.tunnel_dipv4 =
foe->ipv6_6rd.tunnel_dipv4;
+
+#if defined(CONFIG_MEDIATEK_NETSYS_V3)
+ entry.ipv6_6rd.eg_keep_ecn = 1;
+ entry.ipv6_6rd.eg_keep_cls = 1;
+#endif
}
entry.ipv6_3t_route.ipv6_sip0 =
@@ -1418,6 +1437,10 @@
foe->ipv6_3t_route.prot;
entry.ipv6_3t_route.hph =
foe->ipv6_3t_route.hph;
+#if defined(CONFIG_MEDIATEK_NETSYS_V3)
+ entry.ipv6_3t_route.eg_keep_ecn = 1;
+ entry.ipv6_3t_route.eg_keep_cls = 1;
+#endif
}
if (IS_IPV6_5T_ROUTE(foe) || IS_IPV6_6RD(foe)) {
@@ -1425,6 +1448,10 @@
foe->ipv6_5t_route.sport;
entry.ipv6_5t_route.dport =
foe->ipv6_5t_route.dport;
+#if defined(CONFIG_MEDIATEK_NETSYS_V3)
+ entry.ipv6_5t_route.eg_keep_ecn = 1;
+ entry.ipv6_5t_route.eg_keep_cls = 1;
+#endif
}
if (ct && (ct->status & IPS_SRC_NAT)) {
@@ -1463,6 +1490,10 @@
entry.ipv6_hnapt.new_sport = ntohs(pptr->src);
entry.ipv6_hnapt.new_dport = ntohs(pptr->dst);
+#if defined(CONFIG_MEDIATEK_NETSYS_V3)
+ entry.ipv6_hnapt.eg_keep_ecn = 1;
+ entry.ipv6_hnapt.eg_keep_cls = 1;
+#endif
#else
return -1;
#endif
@@ -1518,6 +1549,10 @@
entry.ipv4_dslite.vlan1 = hw_path->vlan_id;
if (hnat_priv->data->per_flow_accounting)
entry.ipv4_dslite.iblk2.mibf = 1;
+#if defined(CONFIG_MEDIATEK_NETSYS_V3)
+ entry.ipv4_dslite.eg_keep_ecn = 1;
+ entry.ipv4_dslite.eg_keep_cls = 1;
+#endif
/* Map-E LAN->WAN record inner IPv4 header info. */
#if defined(CONFIG_MEDIATEK_NETSYS_V2) || defined(CONFIG_MEDIATEK_NETSYS_V3)
if (mape_toggle) {
@@ -1526,6 +1561,10 @@
entry.ipv4_mape.new_dip = foe->ipv4_mape.new_dip;
entry.ipv4_mape.new_sport = foe->ipv4_mape.new_sport;
entry.ipv4_mape.new_dport = foe->ipv4_mape.new_dport;
+#if defined(CONFIG_MEDIATEK_NETSYS_V3)
+ entry.ipv4_mape.eg_keep_ecn = 1;
+ entry.ipv4_mape.eg_keep_dscp = 1;
+#endif
}
#endif
} else if (mape_toggle &&
@@ -1582,6 +1621,10 @@
entry.ipv4_hnapt.new_dport =
foe->ipv4_hnapt.new_dport;
mape_l2w_v6h = *ip6h;
+#if defined(CONFIG_MEDIATEK_NETSYS_V3)
+ entry.ipv4_hnapt.eg_keep_ecn = 1;
+ entry.ipv4_hnapt.eg_keep_dscp = 1;
+#endif
}
break;
@@ -1621,6 +1664,11 @@
entry.ipv6_6rd.vlan1 = hw_path->vlan_id;
if (hnat_priv->data->per_flow_accounting)
entry.ipv6_6rd.iblk2.mibf = 1;
+
+#if defined(CONFIG_MEDIATEK_NETSYS_V3)
+ entry.ipv6_6rd.eg_keep_ecn = 1;
+ entry.ipv6_6rd.eg_keep_cls = 1;
+#endif
break;
default:
@@ -1823,9 +1871,8 @@
int mtk_sw_nat_hook_tx(struct sk_buff *skb, int gmac_no)
{
- struct foe_entry *entry;
+ struct foe_entry *hw_entry, entry;
struct ethhdr *eth;
- struct hnat_bind_info_blk bfib1_tx;
if (skb_hnat_alg(skb) ||
!is_magic_tag_valid(skb) || !IS_SPACE_AVAILABLE_HEAD(skb))
@@ -1851,38 +1898,39 @@
skb_hnat_ppe(skb) >= CFG_PPE_NUM)
return NF_ACCEPT;
- entry = &hnat_priv->foe_table_cpu[skb_hnat_ppe(skb)][skb_hnat_entry(skb)];
- if (entry_hnat_is_bound(entry))
+ hw_entry = &hnat_priv->foe_table_cpu[skb_hnat_ppe(skb)][skb_hnat_entry(skb)];
+ memcpy(&entry, hw_entry, sizeof(entry));
+ if (entry.bfib1.state == BIND)
return NF_ACCEPT;
if (skb_hnat_reason(skb) != HIT_UNBIND_RATE_REACH)
return NF_ACCEPT;
- if (!is_hnat_pre_filled(entry))
+ if (!is_hnat_pre_filled(&entry))
return NF_ACCEPT;
eth = eth_hdr(skb);
- memcpy(&bfib1_tx, &entry->bfib1, sizeof(entry->bfib1));
/*not bind multicast if PPE mcast not enable*/
if (!hnat_priv->data->mcast) {
if (is_multicast_ether_addr(eth->h_dest))
return NF_ACCEPT;
- if (IS_IPV4_GRP(entry))
- entry->ipv4_hnapt.iblk2.mcast = 0;
+ if (IS_IPV4_GRP(&entry))
+ entry.ipv4_hnapt.iblk2.mcast = 0;
else
- entry->ipv6_5t_route.iblk2.mcast = 0;
+ entry.ipv6_5t_route.iblk2.mcast = 0;
}
+ spin_lock(&hnat_priv->entry_lock);
/* Some mt_wifi virtual interfaces, such as apcli,
* will change the smac for specail purpose.
*/
- switch ((int)bfib1_tx.pkt_type) {
+ switch ((int)entry.bfib1.pkt_type) {
case IPV4_HNAPT:
case IPV4_HNAT:
- entry->ipv4_hnapt.smac_hi = swab32(*((u32 *)eth->h_source));
- entry->ipv4_hnapt.smac_lo = swab16(*((u16 *)ð->h_source[4]));
+ entry.ipv4_hnapt.smac_hi = swab32(*((u32 *)eth->h_source));
+ entry.ipv4_hnapt.smac_lo = swab16(*((u16 *)ð->h_source[4]));
break;
case IPV4_DSLITE:
case IPV4_MAP_E:
@@ -1891,167 +1939,221 @@
case IPV6_3T_ROUTE:
case IPV6_HNAPT:
case IPV6_HNAT:
- entry->ipv6_5t_route.smac_hi = swab32(*((u32 *)eth->h_source));
- entry->ipv6_5t_route.smac_lo = swab16(*((u16 *)ð->h_source[4]));
+ entry.ipv6_5t_route.smac_hi = swab32(*((u32 *)eth->h_source));
+ entry.ipv6_5t_route.smac_lo = swab16(*((u16 *)ð->h_source[4]));
break;
}
if (skb_vlan_tagged(skb)) {
- bfib1_tx.vlan_layer = 1;
- bfib1_tx.vpm = 1;
- if (IS_IPV4_GRP(entry)) {
- entry->ipv4_hnapt.etype = htons(ETH_P_8021Q);
- entry->ipv4_hnapt.vlan1 = skb->vlan_tci;
- } else if (IS_IPV6_GRP(entry)) {
- entry->ipv6_5t_route.etype = htons(ETH_P_8021Q);
- entry->ipv6_5t_route.vlan1 = skb->vlan_tci;
+ entry.bfib1.vlan_layer = 1;
+ entry.bfib1.vpm = 1;
+ if (IS_IPV4_GRP(&entry)) {
+ entry.ipv4_hnapt.etype = htons(ETH_P_8021Q);
+ entry.ipv4_hnapt.vlan1 = skb->vlan_tci;
+ } else if (IS_IPV6_GRP(&entry)) {
+ entry.ipv6_5t_route.etype = htons(ETH_P_8021Q);
+ entry.ipv6_5t_route.vlan1 = skb->vlan_tci;
}
} else {
- bfib1_tx.vpm = 0;
- bfib1_tx.vlan_layer = 0;
+ entry.bfib1.vpm = 0;
+ entry.bfib1.vlan_layer = 0;
}
/* MT7622 wifi hw_nat not support QoS */
- if (IS_IPV4_GRP(entry)) {
- entry->ipv4_hnapt.iblk2.fqos = 0;
+ if (IS_IPV4_GRP(&entry)) {
+ entry.ipv4_hnapt.iblk2.fqos = 0;
if ((hnat_priv->data->version == MTK_HNAT_V1_2 &&
gmac_no == NR_WHNAT_WDMA_PORT) ||
((hnat_priv->data->version == MTK_HNAT_V2 ||
hnat_priv->data->version == MTK_HNAT_V3) &&
(gmac_no == NR_WDMA0_PORT || gmac_no == NR_WDMA1_PORT ||
gmac_no == NR_WDMA2_PORT))) {
- entry->ipv4_hnapt.winfo.bssid = skb_hnat_bss_id(skb);
- entry->ipv4_hnapt.winfo.wcid = skb_hnat_wc_id(skb);
+ entry.ipv4_hnapt.winfo.bssid = skb_hnat_bss_id(skb);
+ entry.ipv4_hnapt.winfo.wcid = skb_hnat_wc_id(skb);
#if defined(CONFIG_MEDIATEK_NETSYS_V3)
- entry->ipv4_hnapt.tport_id = IS_HQOS_DL_MODE ? 1 : 0;
- entry->ipv4_hnapt.iblk2.rxid = skb_hnat_rx_id(skb);
- entry->ipv4_hnapt.iblk2.winfoi = 1;
- entry->ipv4_hnapt.winfo_pao.usr_info =
+ entry.ipv4_hnapt.tport_id = IS_HQOS_DL_MODE ? 1 : 0;
+ entry.ipv4_hnapt.iblk2.rxid = skb_hnat_rx_id(skb);
+ entry.ipv4_hnapt.iblk2.winfoi = 1;
+ entry.ipv4_hnapt.winfo_pao.usr_info =
skb_hnat_usr_info(skb);
- entry->ipv4_hnapt.winfo_pao.tid = skb_hnat_tid(skb);
- entry->ipv4_hnapt.winfo_pao.is_fixedrate =
+ entry.ipv4_hnapt.winfo_pao.tid = skb_hnat_tid(skb);
+ entry.ipv4_hnapt.winfo_pao.is_fixedrate =
skb_hnat_is_fixedrate(skb);
- entry->ipv4_hnapt.winfo_pao.is_prior =
+ entry.ipv4_hnapt.winfo_pao.is_prior =
skb_hnat_is_prior(skb);
- entry->ipv4_hnapt.winfo_pao.is_sp = skb_hnat_is_sp(skb);
- entry->ipv4_hnapt.winfo_pao.hf = skb_hnat_hf(skb);
- entry->ipv4_hnapt.winfo_pao.amsdu = skb_hnat_amsdu(skb);
+ entry.ipv4_hnapt.winfo_pao.is_sp = skb_hnat_is_sp(skb);
+ entry.ipv4_hnapt.winfo_pao.hf = skb_hnat_hf(skb);
+ entry.ipv4_hnapt.winfo_pao.amsdu = skb_hnat_amsdu(skb);
#elif defined(CONFIG_MEDIATEK_NETSYS_V2)
- entry->ipv4_hnapt.iblk2.rxid = skb_hnat_rx_id(skb);
- entry->ipv4_hnapt.iblk2.winfoi = 1;
+ entry.ipv4_hnapt.iblk2.rxid = skb_hnat_rx_id(skb);
+ entry.ipv4_hnapt.iblk2.winfoi = 1;
#else
- entry->ipv4_hnapt.winfo.rxid = skb_hnat_rx_id(skb);
- entry->ipv4_hnapt.iblk2w.winfoi = 1;
- entry->ipv4_hnapt.iblk2w.wdmaid = skb_hnat_wdma_id(skb);
+ entry.ipv4_hnapt.winfo.rxid = skb_hnat_rx_id(skb);
+ entry.ipv4_hnapt.iblk2w.winfoi = 1;
+ entry.ipv4_hnapt.iblk2w.wdmaid = skb_hnat_wdma_id(skb);
#endif
} else {
if (IS_GMAC1_MODE && !hnat_dsa_is_enable(hnat_priv)) {
- bfib1_tx.vpm = 1;
- bfib1_tx.vlan_layer = 1;
+ entry.bfib1.vpm = 1;
+ entry.bfib1.vlan_layer = 1;
if (FROM_GE_LAN_GRP(skb))
- entry->ipv4_hnapt.vlan1 = 1;
+ entry.ipv4_hnapt.vlan1 = 1;
else if (FROM_GE_WAN(skb) || FROM_GE_VIRTUAL(skb))
- entry->ipv4_hnapt.vlan1 = 2;
+ entry.ipv4_hnapt.vlan1 = 2;
}
if (IS_HQOS_MODE &&
(FROM_GE_LAN_GRP(skb) || FROM_GE_WAN(skb) || FROM_GE_VIRTUAL(skb))) {
- bfib1_tx.vpm = 0;
- bfib1_tx.vlan_layer = 1;
- entry->ipv4_hnapt.etype = htons(HQOS_MAGIC_TAG);
- entry->ipv4_hnapt.vlan1 = skb_hnat_entry(skb);
- entry->ipv4_hnapt.iblk2.fqos = 1;
+ entry.bfib1.vpm = 0;
+ entry.bfib1.vlan_layer = 1;
+ entry.ipv4_hnapt.etype = htons(HQOS_MAGIC_TAG);
+ entry.ipv4_hnapt.vlan1 = skb_hnat_entry(skb);
+ entry.ipv4_hnapt.iblk2.fqos = 1;
}
}
- entry->ipv4_hnapt.iblk2.dp = gmac_no;
+ entry.ipv4_hnapt.iblk2.dp = gmac_no;
#if defined(CONFIG_MEDIATEK_NETSYS_V3)
- } else if (IS_IPV6_HNAPT(entry) || IS_IPV6_HNAT(entry)) {
- entry->ipv6_hnapt.iblk2.dp = gmac_no;
- entry->ipv6_hnapt.iblk2.rxid = skb_hnat_rx_id(skb);
- entry->ipv6_hnapt.iblk2.winfoi = 1;
+ } else if (IS_IPV6_HNAPT(&entry) || IS_IPV6_HNAT(&entry)) {
+ entry.ipv6_hnapt.iblk2.dp = gmac_no;
+ entry.ipv6_hnapt.iblk2.rxid = skb_hnat_rx_id(skb);
+ entry.ipv6_hnapt.iblk2.winfoi = 1;
- entry->ipv6_hnapt.winfo.bssid = skb_hnat_bss_id(skb);
- entry->ipv6_hnapt.winfo.wcid = skb_hnat_wc_id(skb);
- entry->ipv6_hnapt.winfo_pao.usr_info = skb_hnat_usr_info(skb);
- entry->ipv6_hnapt.winfo_pao.tid = skb_hnat_tid(skb);
- entry->ipv6_hnapt.winfo_pao.is_fixedrate =
+ entry.ipv6_hnapt.winfo.bssid = skb_hnat_bss_id(skb);
+ entry.ipv6_hnapt.winfo.wcid = skb_hnat_wc_id(skb);
+ entry.ipv6_hnapt.winfo_pao.usr_info = skb_hnat_usr_info(skb);
+ entry.ipv6_hnapt.winfo_pao.tid = skb_hnat_tid(skb);
+ entry.ipv6_hnapt.winfo_pao.is_fixedrate =
skb_hnat_is_fixedrate(skb);
- entry->ipv6_hnapt.winfo_pao.is_prior = skb_hnat_is_prior(skb);
- entry->ipv6_hnapt.winfo_pao.is_sp = skb_hnat_is_sp(skb);
- entry->ipv6_hnapt.winfo_pao.hf = skb_hnat_hf(skb);
- entry->ipv6_hnapt.winfo_pao.amsdu = skb_hnat_amsdu(skb);
- entry->ipv6_hnapt.tport_id = IS_HQOS_DL_MODE ? 1 : 0;
+ entry.ipv6_hnapt.winfo_pao.is_prior = skb_hnat_is_prior(skb);
+ entry.ipv6_hnapt.winfo_pao.is_sp = skb_hnat_is_sp(skb);
+ entry.ipv6_hnapt.winfo_pao.hf = skb_hnat_hf(skb);
+ entry.ipv6_hnapt.winfo_pao.amsdu = skb_hnat_amsdu(skb);
+ entry.ipv6_hnapt.tport_id = IS_HQOS_DL_MODE ? 1 : 0;
#endif
} else {
- entry->ipv6_5t_route.iblk2.fqos = 0;
+ entry.ipv6_5t_route.iblk2.fqos = 0;
if ((hnat_priv->data->version == MTK_HNAT_V1_2 &&
gmac_no == NR_WHNAT_WDMA_PORT) ||
((hnat_priv->data->version == MTK_HNAT_V2 ||
hnat_priv->data->version == MTK_HNAT_V3) &&
(gmac_no == NR_WDMA0_PORT || gmac_no == NR_WDMA1_PORT ||
gmac_no == NR_WDMA2_PORT))) {
- entry->ipv6_5t_route.winfo.bssid = skb_hnat_bss_id(skb);
- entry->ipv6_5t_route.winfo.wcid = skb_hnat_wc_id(skb);
+ entry.ipv6_5t_route.winfo.bssid = skb_hnat_bss_id(skb);
+ entry.ipv6_5t_route.winfo.wcid = skb_hnat_wc_id(skb);
#if defined(CONFIG_MEDIATEK_NETSYS_V3)
- entry->ipv6_5t_route.tport_id = IS_HQOS_DL_MODE ? 1 : 0;
- entry->ipv6_5t_route.iblk2.rxid = skb_hnat_rx_id(skb);
- entry->ipv6_5t_route.iblk2.winfoi = 1;
- entry->ipv6_5t_route.winfo_pao.usr_info =
+ entry.ipv6_5t_route.tport_id = IS_HQOS_DL_MODE ? 1 : 0;
+ entry.ipv6_5t_route.iblk2.rxid = skb_hnat_rx_id(skb);
+ entry.ipv6_5t_route.iblk2.winfoi = 1;
+ entry.ipv6_5t_route.winfo_pao.usr_info =
skb_hnat_usr_info(skb);
- entry->ipv6_5t_route.winfo_pao.tid =
+ entry.ipv6_5t_route.winfo_pao.tid =
skb_hnat_tid(skb);
- entry->ipv6_5t_route.winfo_pao.is_fixedrate =
+ entry.ipv6_5t_route.winfo_pao.is_fixedrate =
skb_hnat_is_fixedrate(skb);
- entry->ipv6_5t_route.winfo_pao.is_prior =
+ entry.ipv6_5t_route.winfo_pao.is_prior =
skb_hnat_is_prior(skb);
- entry->ipv6_5t_route.winfo_pao.is_sp =
+ entry.ipv6_5t_route.winfo_pao.is_sp =
skb_hnat_is_sp(skb);
- entry->ipv6_5t_route.winfo_pao.hf =
+ entry.ipv6_5t_route.winfo_pao.hf =
skb_hnat_hf(skb);
- entry->ipv6_5t_route.winfo_pao.amsdu =
+ entry.ipv6_5t_route.winfo_pao.amsdu =
skb_hnat_amsdu(skb);
#elif defined(CONFIG_MEDIATEK_NETSYS_V2)
- entry->ipv6_5t_route.iblk2.rxid = skb_hnat_rx_id(skb);
- entry->ipv6_5t_route.iblk2.winfoi = 1;
+ entry.ipv6_5t_route.iblk2.rxid = skb_hnat_rx_id(skb);
+ entry.ipv6_5t_route.iblk2.winfoi = 1;
#else
- entry->ipv6_5t_route.winfo.rxid = skb_hnat_rx_id(skb);
- entry->ipv6_5t_route.iblk2w.winfoi = 1;
- entry->ipv6_5t_route.iblk2w.wdmaid = skb_hnat_wdma_id(skb);
+ entry.ipv6_5t_route.winfo.rxid = skb_hnat_rx_id(skb);
+ entry.ipv6_5t_route.iblk2w.winfoi = 1;
+ entry.ipv6_5t_route.iblk2w.wdmaid = skb_hnat_wdma_id(skb);
#endif
} else {
if (IS_GMAC1_MODE && !hnat_dsa_is_enable(hnat_priv)) {
- bfib1_tx.vpm = 1;
- bfib1_tx.vlan_layer = 1;
+ entry.bfib1.vpm = 1;
+ entry.bfib1.vlan_layer = 1;
if (FROM_GE_LAN_GRP(skb))
- entry->ipv6_5t_route.vlan1 = 1;
+ entry.ipv6_5t_route.vlan1 = 1;
else if (FROM_GE_WAN(skb) || FROM_GE_VIRTUAL(skb))
- entry->ipv6_5t_route.vlan1 = 2;
+ entry.ipv6_5t_route.vlan1 = 2;
}
if (IS_HQOS_MODE &&
(FROM_GE_LAN_GRP(skb) || FROM_GE_WAN(skb) || FROM_GE_VIRTUAL(skb))) {
- bfib1_tx.vpm = 0;
- bfib1_tx.vlan_layer = 1;
- entry->ipv6_5t_route.etype = htons(HQOS_MAGIC_TAG);
- entry->ipv6_5t_route.vlan1 = skb_hnat_entry(skb);
- entry->ipv6_5t_route.iblk2.fqos = 1;
+ entry.bfib1.vpm = 0;
+ entry.bfib1.vlan_layer = 1;
+ entry.ipv6_5t_route.etype = htons(HQOS_MAGIC_TAG);
+ entry.ipv6_5t_route.vlan1 = skb_hnat_entry(skb);
+ entry.ipv6_5t_route.iblk2.fqos = 1;
}
}
- entry->ipv6_5t_route.iblk2.dp = gmac_no;
+ entry.ipv6_5t_route.iblk2.dp = gmac_no;
}
- bfib1_tx.ttl = 1;
- bfib1_tx.state = BIND;
+ entry.bfib1.ttl = 1;
+ entry.bfib1.state = BIND;
+ if (IS_IPV4_GRP(&entry))
+ entry.ipv4_hnapt.act_dp &= ~UDF_HNAT_PRE_FILLED;
+ else
+ entry.ipv6_5t_route.act_dp &= ~UDF_HNAT_PRE_FILLED;
+ /* We must ensure all info has been updated before set to hw */
wmb();
- memcpy(&entry->bfib1, &bfib1_tx, sizeof(bfib1_tx));
+ memcpy(hw_entry, &entry, sizeof(entry));
- if (IS_IPV4_GRP(entry))
- entry->ipv4_hnapt.act_dp &= ~UDF_HNAT_PRE_FILLED;
- else
- entry->ipv6_5t_route.act_dp &= ~UDF_HNAT_PRE_FILLED;
+#if defined(CONFIG_MEDIATEK_NETSYS_V3)
+ if (debug_level >= 7) {
+ pr_info("%s %d dp:%d rxid:%d tid:%d usr_info:%d bssid:%d wcid:%d hsh-idx:%d sp:%d\n",
+ __func__, __LINE__,
+ gmac_no, skb_hnat_rx_id(skb), skb_hnat_tid(skb),
+ skb_hnat_usr_info(skb), skb_hnat_bss_id(skb),
+ skb_hnat_wc_id(skb), skb_hnat_entry(skb),
+ skb_hnat_sport(skb));
+ if (IS_IPV4_GRP(&entry)) {
+ pr_info("%s %d dp:%d rxid:%d tid:%d uinfo:%d bssid:%d wcid:%d hsh-idx:%d sp:%d\n",
+ __func__, __LINE__,
+ (hw_entry->ipv4_hnapt.iblk2.dp),
+ (hw_entry->ipv4_hnapt.iblk2.rxid),
+ (hw_entry->ipv4_hnapt.winfo_pao.tid),
+ (hw_entry->ipv4_hnapt.winfo_pao.usr_info),
+ (hw_entry->ipv4_hnapt.winfo.bssid),
+ (hw_entry->ipv4_hnapt.winfo.wcid),
+ skb_hnat_entry(skb), skb_hnat_sport(skb));
+ pr_info("%s %d dip:%x sip:%x dp:%x sp:%x hsh-idx:%d\n",
+ __func__, __LINE__,
+ hw_entry->ipv4_hnapt.dip, hw_entry->ipv4_hnapt.sip,
+ hw_entry->ipv4_hnapt.dport, hw_entry->ipv4_hnapt.sport,
+ skb_hnat_entry(skb));
+ pr_info("%s %d new_dip:%x new_sip:%x new_dp:%x new_sp:%x hsh-idx:%d\n",
+ __func__, __LINE__,
+ hw_entry->ipv4_hnapt.new_dip, hw_entry->ipv4_hnapt.new_sip,
+ hw_entry->ipv4_hnapt.new_dport,
+ hw_entry->ipv4_hnapt.new_sport, skb_hnat_entry(skb));
+ } else {
+ pr_info("%s %d dp:%d rxid:%d tid:%d uinfo:%d bssid:%d wcid:%d hidx:%d sp:%d\n",
+ __func__, __LINE__,
+ (hw_entry->ipv6_5t_route.iblk2.dp),
+ (hw_entry->ipv6_5t_route.iblk2.rxid),
+ (hw_entry->ipv6_5t_route.winfo_pao.tid),
+ (hw_entry->ipv6_5t_route.winfo_pao.usr_info),
+ (hw_entry->ipv6_5t_route.winfo.bssid),
+ (hw_entry->ipv6_5t_route.winfo.wcid),
+ skb_hnat_entry(skb), skb_hnat_sport(skb));
+ pr_info("sip:%x-:%x-:%x-:%x dip0:%x-:%x-:%x-:%x dport:%x sport:%x\n",
+ hw_entry->ipv6_5t_route.ipv6_sip0,
+ hw_entry->ipv6_5t_route.ipv6_sip1,
+ hw_entry->ipv6_5t_route.ipv6_sip2,
+ hw_entry->ipv6_5t_route.ipv6_sip3,
+ hw_entry->ipv6_5t_route.ipv6_dip0,
+ hw_entry->ipv6_5t_route.ipv6_dip1,
+ hw_entry->ipv6_5t_route.ipv6_dip2,
+ hw_entry->ipv6_5t_route.ipv6_dip3,
+ hw_entry->ipv6_5t_route.dport,
+ hw_entry->ipv6_5t_route.sport);
+ }
+ }
+#endif
+ spin_unlock(&hnat_priv->entry_lock);
return NF_ACCEPT;
}
@@ -2190,8 +2292,9 @@
}
if (flag) {
- if (debug_level >= 2)
- pr_info("Delete entry idx=%d.\n", skb_hnat_entry(skb));
+ if (debug_level >= 7)
+ pr_info("%s %d Delete entry idx=%d\n", __func__, __LINE__,
+ skb_hnat_entry(skb));
memset(entry, 0, sizeof(struct foe_entry));
hnat_cache_ebl(1);
}
@@ -2553,7 +2656,9 @@
if (fn && fn(skb, arp_dev, &hw_path))
break;
+ spin_lock(&hnat_priv->entry_lock);
skb_to_hnat_info(skb, out, entry, &hw_path);
+ spin_unlock(&hnat_priv->entry_lock);
break;
case HIT_BIND_KEEPALIVE_DUP_OLD_HDR:
/* update hnat count to nf_conntrack by keepalive */
@@ -2564,7 +2669,8 @@
break;
/* update dscp for qos */
- mtk_hnat_dscp_update(skb, entry);
+ if (hnat_priv->data->version != MTK_HNAT_V3)
+ mtk_hnat_dscp_update(skb, entry);
/* update mcast timestamp*/
if (hnat_priv->data->version == MTK_HNAT_V1_3 &&
diff --git a/target/linux/mediatek/patches-5.4/999-4100-mtk-tunnel-offload-support.patch b/target/linux/mediatek/patches-5.4/999-4100-mtk-tunnel-offload-support.patch
index 57a1dde..299c7b4 100644
--- a/target/linux/mediatek/patches-5.4/999-4100-mtk-tunnel-offload-support.patch
+++ b/target/linux/mediatek/patches-5.4/999-4100-mtk-tunnel-offload-support.patch
@@ -1,497 +1,489 @@
---- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
-+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
-@@ -245,6 +245,9 @@ static const char * const mtk_clks_sourc
- "top_netsys_warp_sel",
- };
-
-+struct net_device *(*mtk_get_tnl_dev)(int tnl_idx) = NULL;
-+EXPORT_SYMBOL(mtk_get_tnl_dev);
-+
- void mtk_w32(struct mtk_eth *eth, u32 val, unsigned reg)
- {
- __raw_writel(val, eth->base + reg);
-@@ -2171,6 +2174,7 @@ static int mtk_poll_rx(struct napi_struc
- u64 addr64 = 0;
- u8 *data, *new_data;
- struct mtk_rx_dma_v2 *rxd, trxd;
-+ int tnl_idx = 0;
- int done = 0;
-
- if (unlikely(!ring))
-@@ -2214,11 +2218,20 @@ static int mtk_poll_rx(struct napi_struc
- 0 : RX_DMA_GET_SPORT(trxd.rxd4) - 1;
- }
-
-- if (unlikely(mac < 0 || mac >= MTK_MAC_COUNT ||
-- !eth->netdev[mac]))
-- goto release_desc;
-+ tnl_idx = RX_DMA_GET_TOPS_CRSN(trxd.rxd6);
-+ if (mtk_get_tnl_dev && tnl_idx) {
-+ netdev = mtk_get_tnl_dev(tnl_idx);
-+ if (unlikely(IS_ERR(netdev)))
-+ netdev = NULL;
-+ }
-
-- netdev = eth->netdev[mac];
-+ if (!netdev) {
-+ if (unlikely(mac < 0 || mac >= MTK_MAC_COUNT ||
-+ !eth->netdev[mac]))
-+ goto release_desc;
-+
-+ netdev = eth->netdev[mac];
-+ }
-
- if (unlikely(test_bit(MTK_RESETTING, ð->state)))
- goto release_desc;
-@@ -2303,6 +2316,8 @@ static int mtk_poll_rx(struct napi_struc
- skb_hnat_alg(skb) = 0;
- skb_hnat_filled(skb) = 0;
- skb_hnat_magic_tag(skb) = HNAT_MAGIC_TAG;
-+ skb_hnat_set_tops(skb, 0);
-+ skb_hnat_set_is_decap(skb, 0);
-
- if (skb_hnat_reason(skb) == HIT_BIND_FORCE_TO_CPU) {
- trace_printk("[%s] reason=0x%x(force to CPU) from WAN to Ext\n",
---- a/drivers/net/ethernet/mediatek/mtk_hnat/hnat.c
-+++ b/drivers/net/ethernet/mediatek/mtk_hnat/hnat.c
-@@ -43,6 +43,12 @@ void (*ppe_dev_register_hook)(struct net
- EXPORT_SYMBOL(ppe_dev_register_hook);
- void (*ppe_dev_unregister_hook)(struct net_device *dev) = NULL;
- EXPORT_SYMBOL(ppe_dev_unregister_hook);
-+int (*mtk_tnl_encap_offload)(struct sk_buff *skb) = NULL;
-+EXPORT_SYMBOL(mtk_tnl_encap_offload);
-+int (*mtk_tnl_decap_offload)(struct sk_buff *skb) = NULL;
-+EXPORT_SYMBOL(mtk_tnl_decap_offload);
-+bool (*mtk_tnl_decap_offloadable)(struct sk_buff *skb) = NULL;
-+EXPORT_SYMBOL(mtk_tnl_decap_offloadable);
-
- static void hnat_sma_build_entry(struct timer_list *t)
- {
-@@ -53,6 +59,16 @@ static void hnat_sma_build_entry(struct
- SMA, SMA_FWD_CPU_BUILD_ENTRY);
- }
-
-+struct foe_entry *hnat_get_foe_entry(u32 ppe_id, u32 index)
-+{
-+ if (index == 0x7fff || index >= hnat_priv->foe_etry_num
-+ || ppe_id >= CFG_PPE_NUM)
-+ return ERR_PTR(-EINVAL);
-+
-+ return &hnat_priv->foe_table_cpu[ppe_id][index];
-+}
-+EXPORT_SYMBOL(hnat_get_foe_entry);
-+
- void hnat_cache_ebl(int enable)
- {
- int i;
-@@ -63,6 +79,7 @@ void hnat_cache_ebl(int enable)
- cr_set_field(hnat_priv->ppe_base[i] + PPE_CAH_CTRL, CAH_EN, enable);
- }
- }
-+EXPORT_SYMBOL(hnat_cache_ebl);
-
- static void hnat_reset_timestamp(struct timer_list *t)
- {
---- a/drivers/net/ethernet/mediatek/mtk_hnat/hnat.h
-+++ b/drivers/net/ethernet/mediatek/mtk_hnat/hnat.h
-@@ -1087,6 +1087,8 @@ enum FoeIpAct {
- #define NR_WDMA1_PORT 9
- #define NR_WDMA2_PORT 13
- #define NR_GMAC3_PORT 15
-+#define NR_TDMA_TPORT 4
-+#define NR_TDMA_QDMA_TPORT 5
- #define LAN_DEV_NAME hnat_priv->lan
- #define LAN2_DEV_NAME hnat_priv->lan2
- #define IS_WAN(dev) \
-@@ -1210,6 +1212,8 @@ static inline bool hnat_dsa_is_enable(st
- }
- #endif
-
-+struct foe_entry *hnat_get_foe_entry(u32 ppe_id, u32 index);
-+
- void hnat_deinit_debugfs(struct mtk_hnat *h);
- int hnat_init_debugfs(struct mtk_hnat *h);
- int hnat_register_nf_hooks(void);
-@@ -1226,6 +1230,9 @@ extern int qos_ul_toggle;
- extern int hook_toggle;
- extern int mape_toggle;
- extern int qos_toggle;
-+extern int (*mtk_tnl_encap_offload)(struct sk_buff *skb);
-+extern int (*mtk_tnl_decap_offload)(struct sk_buff *skb);
-+extern bool (*mtk_tnl_decap_offloadable)(struct sk_buff *skb);
-
- int ext_if_add(struct extdev_entry *ext_entry);
- int ext_if_del(struct extdev_entry *ext_entry);
---- a/drivers/net/ethernet/mediatek/mtk_hnat/hnat_nf_hook.c
-+++ b/drivers/net/ethernet/mediatek/mtk_hnat/hnat_nf_hook.c
-@@ -726,10 +726,14 @@ static unsigned int is_ppe_support_type(
- case ETH_P_IP:
- iph = ip_hdr(skb);
-
-- /* do not accelerate non tcp/udp traffic */
-- if ((iph->protocol == IPPROTO_TCP) ||
-+ if (mtk_tnl_decap_offloadable && mtk_tnl_decap_offloadable(skb)) {
-+ /* tunnel protocol is offloadable */
-+ skb_hnat_set_is_decap(skb, 1);
-+ return 1;
-+ } else if ((iph->protocol == IPPROTO_TCP) ||
- (iph->protocol == IPPROTO_UDP) ||
- (iph->protocol == IPPROTO_IPV6)) {
-+ /* do not accelerate non tcp/udp traffic */
- return 1;
- }
-
-@@ -846,6 +850,13 @@ mtk_hnat_ipv4_nf_pre_routing(void *priv,
-
- hnat_set_head_frags(state, skb, -1, hnat_set_iif);
-
-+ if (skb_hnat_tops(skb) && skb_hnat_is_decap(skb)
-+ && is_magic_tag_valid(skb)
-+ && skb_hnat_iface(skb) == FOE_MAGIC_GE_VIRTUAL
-+ && mtk_tnl_decap_offload && mtk_tnl_decap_offload(skb)) {
-+ return NF_ACCEPT;
-+ }
-+
- /*
- * Avoid mistakenly binding of outer IP, ports in SW L2TP decap flow.
- * In pre-routing, if dev is virtual iface, TOPS module is not loaded,
-@@ -922,6 +933,13 @@ mtk_hnat_br_nf_local_in(void *priv, stru
-
- hnat_set_head_frags(state, skb, -1, hnat_set_iif);
-
-+ if (skb_hnat_tops(skb) && skb_hnat_is_decap(skb)
-+ && is_magic_tag_valid(skb)
-+ && skb_hnat_iface(skb) == FOE_MAGIC_GE_VIRTUAL
-+ && mtk_tnl_decap_offload && mtk_tnl_decap_offload(skb)) {
-+ return NF_ACCEPT;
-+ }
-+
- pre_routing_print(skb, state->in, state->out, __func__);
-
- if (unlikely(debug_level >= 7)) {
-@@ -1074,9 +1092,22 @@ static unsigned int hnat_ipv4_get_nextho
- return -1;
- }
-
-+ /*
-+ * if this packet is a tunnel packet and is about to construct
-+ * outer header, we must update its outer mac header pointer
-+ * before filling outer mac or it may screw up inner mac
-+ */
-+ if (skb_hnat_tops(skb) && skb_hnat_is_encap(skb)) {
-+ skb_push(skb, sizeof(struct ethhdr));
-+ skb_reset_mac_header(skb);
-+ }
-+
- memcpy(eth_hdr(skb)->h_dest, neigh->ha, ETH_ALEN);
- memcpy(eth_hdr(skb)->h_source, out->dev_addr, ETH_ALEN);
-
-+ if (skb_hnat_tops(skb) && skb_hnat_is_encap(skb))
-+ skb_pull(skb, sizeof(struct ethhdr));
-+
- rcu_read_unlock_bh();
-
- return 0;
-@@ -1202,6 +1233,81 @@ static struct ethhdr *get_ipv6_ipip_ethh
- return eth;
- }
-
-+static inline void hnat_get_filled_unbind_entry(struct sk_buff *skb,
-+ struct foe_entry *entry)
-+{
-+ if (unlikely(!skb || !entry))
-+ return;
-+
-+ memcpy(entry,
-+ &hnat_priv->foe_table_cpu[skb_hnat_ppe(skb)][skb_hnat_entry(skb)],
-+ sizeof(*entry));
-+
-+#if defined(CONFIG_MEDIATEK_NETSYS_V2) || defined(CONFIG_MEDIATEK_NETSYS_V3)
-+ entry->bfib1.mc = 0;
-+#endif /* defined(CONFIG_MEDIATEK_NETSYS_V2) || defined(CONFIG_MEDIATEK_NETSYS_V3) */
-+ entry->bfib1.ka = 0;
-+ entry->bfib1.vlan_layer = 0;
-+ entry->bfib1.psn = 0;
-+ entry->bfib1.vpm = 0;
-+ entry->bfib1.ps = 0;
-+}
-+
-+static inline void hnat_qos_tnl(u32 id, const struct net_device *dev)
-+{
-+ u32 cfg;
-+ u32 max_man = 0;
-+ u32 max_exp = 0;
-+ const struct mtk_mac *mac;
-+
-+ if (!dev)
-+ return;
-+ mac = netdev_priv(dev);
-+
-+ switch (mac->speed) {
-+ case SPEED_100:
-+ case SPEED_1000:
-+ case SPEED_2500:
-+ case SPEED_5000:
-+ case SPEED_10000:
-+ max_man = mac->speed / SPEED_100;
-+ max_exp = 5;
-+ break;
-+ default:
-+ return;
-+ }
-+
-+ cfg = QTX_SCH_MIN_RATE_EN | QTX_SCH_MAX_RATE_EN;
-+ cfg |= (1 << QTX_SCH_MIN_RATE_MAN_OFFSET) |
-+ (4 << QTX_SCH_MIN_RATE_EXP_OFFSET) |
-+ (max_man << QTX_SCH_MAX_RATE_MAN_OFFSET) |
-+ (max_exp << QTX_SCH_MAX_RATE_EXP_OFFSET) |
-+ (4 << QTX_SCH_MAX_RATE_WGHT_OFFSET);
-+ writel(cfg, hnat_priv->fe_base + QTX_SCH(id % NUM_OF_Q_PER_PAGE));
-+}
-+
-+static inline void hnat_fill_offload_engine_entry(struct sk_buff *skb,
-+ struct foe_entry *entry,
-+ const struct net_device *dev)
-+{
-+#if defined(CONFIG_MEDIATEK_NETSYS_V3)
-+ if (skb_hnat_tops(skb) && skb_hnat_is_encap(skb)) {
-+ /*
-+ * if skb_hnat_tops(skb) is setup for encapsulation,
-+ * we fill in hnat tport and tops_entry for tunnel encapsulation
-+ * offloading
-+ */
-+ entry->ipv4_hnapt.tport_id = NR_TDMA_QDMA_TPORT;
-+ entry->ipv4_hnapt.tops_entry = skb_hnat_tops(skb);
-+ } else {
-+ return;
-+ }
-+
-+ entry->ipv4_hnapt.iblk2.qid = 12; /* offload engine use QID 12 */
-+ hnat_qos_tnl(12, dev); /* set rate limit to line rate */
-+#endif /* defined(CONFIG_MEDIATEK_NETSYS_V3) */
-+}
-+
- static unsigned int skb_to_hnat_info(struct sk_buff *skb,
- const struct net_device *dev,
- struct foe_entry *foe,
-@@ -1238,6 +1344,11 @@ static unsigned int skb_to_hnat_info(str
- if (whnat && is_hnat_pre_filled(foe))
- return 0;
-
-+ if (skb_hnat_tops(skb) && !(hw_path->flags & FLOW_OFFLOAD_PATH_TNL)) {
-+ hnat_get_filled_unbind_entry(skb, &entry);
-+ goto hnat_entry_bind;
-+ }
-+
- entry.bfib1.pkt_type = foe->udib1.pkt_type; /* Get packte type state*/
- entry.bfib1.state = foe->udib1.state;
-
-@@ -1633,6 +1744,10 @@ static unsigned int skb_to_hnat_info(str
- /* Fill Layer2 Info.*/
- entry = ppe_fill_L2_info(eth, entry, hw_path);
-
-+ if (skb_hnat_tops(skb) && hw_path->flags & FLOW_OFFLOAD_PATH_TNL)
-+ goto hnat_entry_skip_bind;
-+
-+hnat_entry_bind:
- /* Fill Info Blk*/
- entry = ppe_fill_info_blk(eth, entry, hw_path);
-
-@@ -1833,7 +1948,20 @@ static unsigned int skb_to_hnat_info(str
- entry.ipv6_5t_route.act_dp |= UDF_HNAT_PRE_FILLED;
- }
-
-+#if defined(CONFIG_MEDIATEK_NETSYS_V3)
-+ hnat_fill_offload_engine_entry(skb, &entry, dev);
-+#endif
-+
-+hnat_entry_skip_bind:
- wmb();
-+
-+ /*
-+ * final check before we write BIND info.
-+ * If this entry is already bound, we should not modify it right now
-+ */
-+ if (entry_hnat_is_bound(foe))
-+ return 0;
-+
- memcpy(foe, &entry, sizeof(entry));
- /*reset statistic for this entry*/
- if (hnat_priv->data->per_flow_accounting &&
-@@ -1886,6 +2014,7 @@ int mtk_sw_nat_hook_tx(struct sk_buff *s
- return NF_ACCEPT;
-
- eth = eth_hdr(skb);
-+
- memcpy(&bfib1_tx, &entry->bfib1, sizeof(entry->bfib1));
-
- /*not bind multicast if PPE mcast not enable*/
-@@ -1905,6 +2034,12 @@ int mtk_sw_nat_hook_tx(struct sk_buff *s
- switch ((int)bfib1_tx.pkt_type) {
- case IPV4_HNAPT:
- case IPV4_HNAT:
-+ /*
-+ * skip if packet is an encap tnl packet or it may
-+ * screw up inner mac header
-+ */
-+ if (skb_hnat_tops(skb) && skb_hnat_is_encap(skb))
-+ break;
- entry->ipv4_hnapt.smac_hi = swab32(*((u32 *)eth->h_source));
- entry->ipv4_hnapt.smac_lo = swab16(*((u16 *)ð->h_source[4]));
- break;
-@@ -2066,6 +2201,10 @@ int mtk_sw_nat_hook_tx(struct sk_buff *s
- entry->ipv6_5t_route.iblk2.dp = gmac_no;
- }
-
-+#if defined(CONFIG_MEDIATEK_NETSYS_V3)
-+ hnat_fill_offload_engine_entry(skb, entry, NULL);
-+#endif
-+
- bfib1_tx.ttl = 1;
- bfib1_tx.state = BIND;
- wmb();
-@@ -2087,6 +2226,7 @@ int mtk_sw_nat_hook_rx(struct sk_buff *s
- }
-
- skb_hnat_alg(skb) = 0;
-+ skb_hnat_set_tops(skb, 0);
- skb_hnat_magic_tag(skb) = HNAT_MAGIC_TAG;
-
- if (skb_hnat_iface(skb) == FOE_MAGIC_WED0)
-@@ -2535,6 +2675,7 @@ static unsigned int mtk_hnat_nf_post_rou
- struct flow_offload_hw_path hw_path = { .dev = (struct net_device*)out,
- .virt_dev = (struct net_device*)out };
- const struct net_device *arp_dev = out;
-+ bool is_virt_dev = false;
-
- if (xlat_toggle && !mtk_464xlat_post_process(skb, out))
- return 0;
-@@ -2551,10 +2692,29 @@ static unsigned int mtk_hnat_nf_post_rou
-
- if (out->netdev_ops->ndo_flow_offload_check) {
- out->netdev_ops->ndo_flow_offload_check(&hw_path);
-+
- out = (IS_GMAC1_MODE) ? hw_path.virt_dev : hw_path.dev;
-+ if (hw_path.flags & FLOW_OFFLOAD_PATH_TNL && mtk_tnl_encap_offload) {
-+ if (ntohs(skb->protocol) == ETH_P_IP
-+ && ip_hdr(skb)->protocol == IPPROTO_TCP) {
-+ skb_hnat_set_tops(skb, hw_path.tnl_type + 1);
-+ } else {
-+ /*
-+ * we are not support protocols other than IPv4 TCP
-+ * for tunnel protocol offload yet
-+ */
-+ skb_hnat_alg(skb) = 1;
-+ return 0;
-+ }
-+ }
- }
-
- if (!IS_LAN_GRP(out) && !IS_WAN(out) && !IS_EXT(out))
-+ is_virt_dev = true;
-+
-+ if (is_virt_dev
-+ && !(skb_hnat_tops(skb) && skb_hnat_is_encap(skb)
-+ && (hw_path.flags & FLOW_OFFLOAD_PATH_TNL)))
- return 0;
-
- trace_printk("[%s] case hit, %x-->%s, reason=%x\n", __func__,
-@@ -2574,9 +2734,18 @@ static unsigned int mtk_hnat_nf_post_rou
- if (fn && !mtk_hnat_accel_type(skb))
- break;
-
-- if (fn && fn(skb, arp_dev, &hw_path))
-+ if (!is_virt_dev && fn && fn(skb, arp_dev, &hw_path))
- break;
-
-+ /* skb_hnat_tops(skb) is updated in mtk_tnl_offload() */
-+ if (skb_hnat_tops(skb)) {
-+ if (skb_hnat_is_encap(skb) && !is_virt_dev
-+ && mtk_tnl_encap_offload && mtk_tnl_encap_offload(skb))
-+ break;
-+ if (skb_hnat_is_decap(skb))
-+ break;
-+ }
-+
- skb_to_hnat_info(skb, out, entry, &hw_path);
- break;
- case HIT_BIND_KEEPALIVE_DUP_OLD_HDR:
-@@ -2847,7 +3016,7 @@ mtk_hnat_ipv4_nf_local_out(void *priv, s
- if (iph->protocol == IPPROTO_IPV6) {
- entry->udib1.pkt_type = IPV6_6RD;
- hnat_set_head_frags(state, skb, 0, hnat_set_alg);
-- } else {
-+ } else if (!skb_hnat_tops(skb)) {
- hnat_set_head_frags(state, skb, 1, hnat_set_alg);
- }
-
---- a/drivers/net/ethernet/mediatek/mtk_hnat/nf_hnat_mtk.h
-+++ b/drivers/net/ethernet/mediatek/mtk_hnat/nf_hnat_mtk.h
-@@ -44,7 +44,9 @@ struct hnat_desc {
- u32 is_sp : 1;
- u32 hf : 1;
- u32 amsdu : 1;
-- u32 resv3 : 19;
-+ u32 tops : 6;
-+ u32 is_decap : 1;
-+ u32 resv3 : 12;
- u32 magic_tag_protect : 16;
- } __packed;
- #elif defined(CONFIG_MEDIATEK_NETSYS_RX_V2)
-@@ -91,6 +93,19 @@ struct hnat_desc {
- ((((skb_headroom(skb) >= FOE_INFO_LEN) ? 1 : 0)))
-
- #define skb_hnat_info(skb) ((struct hnat_desc *)(skb->head))
-+#if defined(CONFIG_MEDIATEK_NETSYS_V3)
-+#define skb_hnat_tops(skb) (((struct hnat_desc *)((skb)->head))->tops)
-+#define skb_hnat_is_decap(skb) (((struct hnat_desc *)((skb)->head))->is_decap)
-+#define skb_hnat_is_encap(skb) (!skb_hnat_is_decap(skb))
-+#define skb_hnat_set_tops(skb, tops) ((skb_hnat_tops(skb)) = (tops))
-+#define skb_hnat_set_is_decap(skb, is_decap) ((skb_hnat_is_decap(skb)) = (is_decap))
-+#else /* !defined(CONFIG_MEDIATEK_NETSYS_V3) */
-+#define skb_hnat_tops(skb) (0)
-+#define skb_hnat_is_decap(skb) (0)
-+#define skb_hnat_is_encap(skb) (0)
-+#define skb_hnat_set_tops(skb, tops)
-+#define skb_hnat_set_is_decap(skb, is_decap)
-+#endif /* defined(CONFIG_MEDIATEK_NETSYS_V3) */
- #define skb_hnat_magic(skb) (((struct hnat_desc *)(skb->head))->magic)
- #define skb_hnat_reason(skb) (((struct hnat_desc *)(skb->head))->crsn)
- #define skb_hnat_entry(skb) (((struct hnat_desc *)(skb->head))->entry)
---- a/include/net/netfilter/nf_flow_table.h
-+++ b/include/net/netfilter/nf_flow_table.h
-@@ -98,10 +98,22 @@ struct flow_offload {
- #define FLOW_OFFLOAD_PATH_6RD BIT(5)
- #define FLOW_OFFLOAD_PATH_TNL BIT(6)
-
-+enum flow_offload_tnl {
-+ FLOW_OFFLOAD_TNL_GRETAP,
-+ FLOW_OFFLOAD_TNL_PPTP,
-+ FLOW_OFFLOAD_TNL_IP_L2TP,
-+ FLOW_OFFLOAD_TNL_UDP_L2TP_CTRL,
-+ FLOW_OFFLOAD_TNL_UDP_L2TP_DATA,
-+ FLOW_OFFLOAD_VXLAN,
-+ FLOW_OFFLOAD_NATT,
-+ __FLOW_OFFLOAD_MAX,
-+};
-+
- struct flow_offload_hw_path {
- struct net_device *dev;
- struct net_device *virt_dev;
- u32 flags;
-+ u32 tnl_type;
-
- u8 eth_src[ETH_ALEN];
- u8 eth_dest[ETH_ALEN];
---- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
-+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
-@@ -1874,6 +1874,9 @@ extern const struct of_device_id of_mtk_
- extern u32 mtk_hwlro_stats_ebl;
- extern u32 dbg_show_level;
-
-+/* tunnel offload related */
-+extern struct net_device *(*mtk_get_tnl_dev)(int tnl_idx);
-+
- /* read the hardware status register */
- void mtk_stats_update_mac(struct mtk_mac *mac);
-
+--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+@@ -245,6 +245,9 @@ static const char * const mtk_clks_source_name[] = {
+ "top_netsys_warp_sel",
+ };
+
++struct net_device *(*mtk_get_tnl_dev)(int tnl_idx) = NULL;
++EXPORT_SYMBOL(mtk_get_tnl_dev);
++
+ void mtk_w32(struct mtk_eth *eth, u32 val, unsigned reg)
+ {
+ __raw_writel(val, eth->base + reg);
+@@ -2186,6 +2189,7 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
+ u64 addr64 = 0;
+ u8 *data, *new_data;
+ struct mtk_rx_dma_v2 *rxd, trxd;
++ int tnl_idx = 0;
+ int done = 0;
+
+ if (unlikely(!ring))
+@@ -2229,11 +2233,20 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
+ 0 : RX_DMA_GET_SPORT(trxd.rxd4) - 1;
+ }
+
+- if (unlikely(mac < 0 || mac >= MTK_MAC_COUNT ||
+- !eth->netdev[mac]))
+- goto release_desc;
++ tnl_idx = RX_DMA_GET_TOPS_CRSN(trxd.rxd6);
++ if (mtk_get_tnl_dev && tnl_idx) {
++ netdev = mtk_get_tnl_dev(tnl_idx);
++ if (unlikely(IS_ERR(netdev)))
++ netdev = NULL;
++ }
+
+- netdev = eth->netdev[mac];
++ if (!netdev) {
++ if (unlikely(mac < 0 || mac >= MTK_MAC_COUNT ||
++ !eth->netdev[mac]))
++ goto release_desc;
++
++ netdev = eth->netdev[mac];
++ }
+
+ if (unlikely(test_bit(MTK_RESETTING, ð->state)))
+ goto release_desc;
+@@ -2318,6 +2331,8 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
+ skb_hnat_alg(skb) = 0;
+ skb_hnat_filled(skb) = 0;
+ skb_hnat_magic_tag(skb) = HNAT_MAGIC_TAG;
++ skb_hnat_set_tops(skb, 0);
++ skb_hnat_set_is_decap(skb, 0);
+
+ if (skb_hnat_reason(skb) == HIT_BIND_FORCE_TO_CPU) {
+ trace_printk("[%s] reason=0x%x(force to CPU) from WAN to Ext\n",
+--- a/drivers/net/ethernet/mediatek/mtk_hnat/hnat.c
++++ b/drivers/net/ethernet/mediatek/mtk_hnat/hnat.c
+@@ -43,6 +43,12 @@ void (*ppe_dev_register_hook)(struct net_device *dev) = NULL;
+ EXPORT_SYMBOL(ppe_dev_register_hook);
+ void (*ppe_dev_unregister_hook)(struct net_device *dev) = NULL;
+ EXPORT_SYMBOL(ppe_dev_unregister_hook);
++int (*mtk_tnl_encap_offload)(struct sk_buff *skb) = NULL;
++EXPORT_SYMBOL(mtk_tnl_encap_offload);
++int (*mtk_tnl_decap_offload)(struct sk_buff *skb) = NULL;
++EXPORT_SYMBOL(mtk_tnl_decap_offload);
++bool (*mtk_tnl_decap_offloadable)(struct sk_buff *skb) = NULL;
++EXPORT_SYMBOL(mtk_tnl_decap_offloadable);
+
+ static void hnat_sma_build_entry(struct timer_list *t)
+ {
+@@ -53,6 +59,16 @@ static void hnat_sma_build_entry(struct timer_list *t)
+ SMA, SMA_FWD_CPU_BUILD_ENTRY);
+ }
+
++struct foe_entry *hnat_get_foe_entry(u32 ppe_id, u32 index)
++{
++ if (index == 0x7fff || index >= hnat_priv->foe_etry_num
++ || ppe_id >= CFG_PPE_NUM)
++ return ERR_PTR(-EINVAL);
++
++ return &hnat_priv->foe_table_cpu[ppe_id][index];
++}
++EXPORT_SYMBOL(hnat_get_foe_entry);
++
+ void hnat_cache_ebl(int enable)
+ {
+ int i;
+@@ -63,6 +79,7 @@ void hnat_cache_ebl(int enable)
+ cr_set_field(hnat_priv->ppe_base[i] + PPE_CAH_CTRL, CAH_EN, enable);
+ }
+ }
++EXPORT_SYMBOL(hnat_cache_ebl);
+
+ static void hnat_reset_timestamp(struct timer_list *t)
+ {
+--- a/drivers/net/ethernet/mediatek/mtk_hnat/hnat.h
++++ b/drivers/net/ethernet/mediatek/mtk_hnat/hnat.h
+@@ -1133,6 +1133,8 @@ enum FoeIpAct {
+ #define NR_WDMA1_PORT 9
+ #define NR_WDMA2_PORT 13
+ #define NR_GMAC3_PORT 15
++#define NR_TDMA_TPORT 4
++#define NR_TDMA_QDMA_TPORT 5
+ #define LAN_DEV_NAME hnat_priv->lan
+ #define LAN2_DEV_NAME hnat_priv->lan2
+ #define IS_WAN(dev) \
+@@ -1256,6 +1258,8 @@ static inline bool hnat_dsa_is_enable(struct mtk_hnat *priv)
+ }
+ #endif
+
++struct foe_entry *hnat_get_foe_entry(u32 ppe_id, u32 index);
++
+ void hnat_deinit_debugfs(struct mtk_hnat *h);
+ int hnat_init_debugfs(struct mtk_hnat *h);
+ int hnat_register_nf_hooks(void);
+@@ -1272,6 +1276,9 @@ extern int qos_ul_toggle;
+ extern int hook_toggle;
+ extern int mape_toggle;
+ extern int qos_toggle;
++extern int (*mtk_tnl_encap_offload)(struct sk_buff *skb);
++extern int (*mtk_tnl_decap_offload)(struct sk_buff *skb);
++extern bool (*mtk_tnl_decap_offloadable)(struct sk_buff *skb);
+
+ int ext_if_add(struct extdev_entry *ext_entry);
+ int ext_if_del(struct extdev_entry *ext_entry);
+--- a/drivers/net/ethernet/mediatek/mtk_hnat/hnat_nf_hook.c
++++ b/drivers/net/ethernet/mediatek/mtk_hnat/hnat_nf_hook.c
+@@ -726,10 +726,14 @@ static unsigned int is_ppe_support_type(struct sk_buff *skb)
+ case ETH_P_IP:
+ iph = ip_hdr(skb);
+
+- /* do not accelerate non tcp/udp traffic */
+- if ((iph->protocol == IPPROTO_TCP) ||
++ if (mtk_tnl_decap_offloadable && mtk_tnl_decap_offloadable(skb)) {
++ /* tunnel protocol is offloadable */
++ skb_hnat_set_is_decap(skb, 1);
++ return 1;
++ } else if ((iph->protocol == IPPROTO_TCP) ||
+ (iph->protocol == IPPROTO_UDP) ||
+ (iph->protocol == IPPROTO_IPV6)) {
++ /* do not accelerate non tcp/udp traffic */
+ return 1;
+ }
+
+@@ -846,6 +850,13 @@ mtk_hnat_ipv4_nf_pre_routing(void *priv, struct sk_buff *skb,
+
+ hnat_set_head_frags(state, skb, -1, hnat_set_iif);
+
++ if (skb_hnat_tops(skb) && skb_hnat_is_decap(skb)
++ && is_magic_tag_valid(skb)
++ && skb_hnat_iface(skb) == FOE_MAGIC_GE_VIRTUAL
++ && mtk_tnl_decap_offload && mtk_tnl_decap_offload(skb)) {
++ return NF_ACCEPT;
++ }
++
+ /*
+ * Avoid mistakenly binding of outer IP, ports in SW L2TP decap flow.
+ * In pre-routing, if dev is virtual iface, TOPS module is not loaded,
+@@ -921,6 +932,13 @@ mtk_hnat_br_nf_local_in(void *priv, struct sk_buff *skb,
+
+ hnat_set_head_frags(state, skb, -1, hnat_set_iif);
+
++ if (skb_hnat_tops(skb) && skb_hnat_is_decap(skb)
++ && is_magic_tag_valid(skb)
++ && skb_hnat_iface(skb) == FOE_MAGIC_GE_VIRTUAL
++ && mtk_tnl_decap_offload && mtk_tnl_decap_offload(skb)) {
++ return NF_ACCEPT;
++ }
++
+ pre_routing_print(skb, state->in, state->out, __func__);
+
+ if (unlikely(debug_level >= 7)) {
+@@ -1073,9 +1091,22 @@ static unsigned int hnat_ipv4_get_nexthop(struct sk_buff *skb,
+ return -1;
+ }
+
++ /*
++ * if this packet is a tunnel packet and is about to construct
++ * outer header, we must update its outer mac header pointer
++ * before filling outer mac or it may screw up inner mac
++ */
++ if (skb_hnat_tops(skb) && skb_hnat_is_encap(skb)) {
++ skb_push(skb, sizeof(struct ethhdr));
++ skb_reset_mac_header(skb);
++ }
++
+ memcpy(eth_hdr(skb)->h_dest, neigh->ha, ETH_ALEN);
+ memcpy(eth_hdr(skb)->h_source, out->dev_addr, ETH_ALEN);
+
++ if (skb_hnat_tops(skb) && skb_hnat_is_encap(skb))
++ skb_pull(skb, sizeof(struct ethhdr));
++
+ rcu_read_unlock_bh();
+
+ return 0;
+@@ -1201,6 +1232,81 @@ static struct ethhdr *get_ipv6_ipip_ethhdr(struct sk_buff *skb,
+ return eth;
+ }
+
++static inline void hnat_get_filled_unbind_entry(struct sk_buff *skb,
++ struct foe_entry *entry)
++{
++ if (unlikely(!skb || !entry))
++ return;
++
++ memcpy(entry,
++ &hnat_priv->foe_table_cpu[skb_hnat_ppe(skb)][skb_hnat_entry(skb)],
++ sizeof(*entry));
++
++#if defined(CONFIG_MEDIATEK_NETSYS_V2) || defined(CONFIG_MEDIATEK_NETSYS_V3)
++ entry->bfib1.mc = 0;
++#endif /* defined(CONFIG_MEDIATEK_NETSYS_V2) || defined(CONFIG_MEDIATEK_NETSYS_V3) */
++ entry->bfib1.ka = 0;
++ entry->bfib1.vlan_layer = 0;
++ entry->bfib1.psn = 0;
++ entry->bfib1.vpm = 0;
++ entry->bfib1.ps = 0;
++}
++
++static inline void hnat_qos_tnl(u32 id, const struct net_device *dev)
++{
++ u32 cfg;
++ u32 max_man = 0;
++ u32 max_exp = 0;
++ const struct mtk_mac *mac;
++
++ if (!dev)
++ return;
++ mac = netdev_priv(dev);
++
++ switch (mac->speed) {
++ case SPEED_100:
++ case SPEED_1000:
++ case SPEED_2500:
++ case SPEED_5000:
++ case SPEED_10000:
++ max_man = mac->speed / SPEED_100;
++ max_exp = 5;
++ break;
++ default:
++ return;
++ }
++
++ cfg = QTX_SCH_MIN_RATE_EN | QTX_SCH_MAX_RATE_EN;
++ cfg |= (1 << QTX_SCH_MIN_RATE_MAN_OFFSET) |
++ (4 << QTX_SCH_MIN_RATE_EXP_OFFSET) |
++ (max_man << QTX_SCH_MAX_RATE_MAN_OFFSET) |
++ (max_exp << QTX_SCH_MAX_RATE_EXP_OFFSET) |
++ (4 << QTX_SCH_MAX_RATE_WGHT_OFFSET);
++ writel(cfg, hnat_priv->fe_base + QTX_SCH(id % NUM_OF_Q_PER_PAGE));
++}
++
++static inline void hnat_fill_offload_engine_entry(struct sk_buff *skb,
++ struct foe_entry *entry,
++ const struct net_device *dev)
++{
++#if defined(CONFIG_MEDIATEK_NETSYS_V3)
++ if (skb_hnat_tops(skb) && skb_hnat_is_encap(skb)) {
++ /*
++ * if skb_hnat_tops(skb) is setup for encapsulation,
++ * we fill in hnat tport and tops_entry for tunnel encapsulation
++ * offloading
++ */
++ entry->ipv4_hnapt.tport_id = NR_TDMA_QDMA_TPORT;
++ entry->ipv4_hnapt.tops_entry = skb_hnat_tops(skb);
++ } else {
++ return;
++ }
++
++ entry->ipv4_hnapt.iblk2.qid = 12; /* offload engine use QID 12 */
++ hnat_qos_tnl(12, dev); /* set rate limit to line rate */
++#endif /* defined(CONFIG_MEDIATEK_NETSYS_V3) */
++}
++
+ static unsigned int skb_to_hnat_info(struct sk_buff *skb,
+ const struct net_device *dev,
+ struct foe_entry *foe,
+@@ -1237,6 +1343,11 @@ static unsigned int skb_to_hnat_info(struct sk_buff *skb,
+ if (whnat && is_hnat_pre_filled(foe))
+ return 0;
+
++ if (skb_hnat_tops(skb) && !(hw_path->flags & FLOW_OFFLOAD_PATH_TNL)) {
++ hnat_get_filled_unbind_entry(skb, &entry);
++ goto hnat_entry_bind;
++ }
++
+ entry.bfib1.pkt_type = foe->udib1.pkt_type; /* Get packte type state*/
+ entry.bfib1.state = foe->udib1.state;
+
+@@ -1679,6 +1790,10 @@ static unsigned int skb_to_hnat_info(struct sk_buff *skb,
+ /* Fill Layer2 Info.*/
+ entry = ppe_fill_L2_info(eth, entry, hw_path);
+
++ if (skb_hnat_tops(skb) && hw_path->flags & FLOW_OFFLOAD_PATH_TNL)
++ goto hnat_entry_skip_bind;
++
++hnat_entry_bind:
+ /* Fill Info Blk*/
+ entry = ppe_fill_info_blk(eth, entry, hw_path);
+
+@@ -1879,7 +1994,20 @@ static unsigned int skb_to_hnat_info(struct sk_buff *skb,
+ entry.ipv6_5t_route.act_dp |= UDF_HNAT_PRE_FILLED;
+ }
+
++#if defined(CONFIG_MEDIATEK_NETSYS_V3)
++ hnat_fill_offload_engine_entry(skb, &entry, dev);
++#endif
++
++hnat_entry_skip_bind:
+ wmb();
++
++ /*
++ * final check before we write BIND info.
++ * If this entry is already bound, we should not modify it right now
++ */
++ if (entry_hnat_is_bound(foe))
++ return 0;
++
+ memcpy(foe, &entry, sizeof(entry));
+ /*reset statistic for this entry*/
+ if (hnat_priv->data->per_flow_accounting &&
+@@ -1951,6 +2079,12 @@ int mtk_sw_nat_hook_tx(struct sk_buff *skb, int gmac_no)
+ switch ((int)entry.bfib1.pkt_type) {
+ case IPV4_HNAPT:
+ case IPV4_HNAT:
++ /*
++ * skip if packet is an encap tnl packet or it may
++ * screw up inner mac header
++ */
++ if (skb_hnat_tops(skb) && skb_hnat_is_encap(skb))
++ break;
+ entry.ipv4_hnapt.smac_hi = swab32(*((u32 *)eth->h_source));
+ entry.ipv4_hnapt.smac_lo = swab16(*((u16 *)ð->h_source[4]));
+ break;
+@@ -2112,6 +2246,10 @@ int mtk_sw_nat_hook_tx(struct sk_buff *skb, int gmac_no)
+ entry.ipv6_5t_route.iblk2.dp = gmac_no;
+ }
+
++#if defined(CONFIG_MEDIATEK_NETSYS_V3)
++ hnat_fill_offload_engine_entry(skb, &entry, NULL);
++#endif
++
+ entry.bfib1.ttl = 1;
+ entry.bfib1.state = BIND;
+
+@@ -2187,6 +2325,7 @@ int mtk_sw_nat_hook_rx(struct sk_buff *skb)
+ }
+
+ skb_hnat_alg(skb) = 0;
++ skb_hnat_set_tops(skb, 0);
+ skb_hnat_magic_tag(skb) = HNAT_MAGIC_TAG;
+
+ if (skb_hnat_iface(skb) == FOE_MAGIC_WED0)
+@@ -2636,6 +2775,7 @@ static unsigned int mtk_hnat_nf_post_routing(
+ struct flow_offload_hw_path hw_path = { .dev = (struct net_device*)out,
+ .virt_dev = (struct net_device*)out };
+ const struct net_device *arp_dev = out;
++ bool is_virt_dev = false;
+
+ if (xlat_toggle && !mtk_464xlat_post_process(skb, out))
+ return 0;
+@@ -2652,10 +2792,29 @@ static unsigned int mtk_hnat_nf_post_routing(
+
+ if (out->netdev_ops->ndo_flow_offload_check) {
+ out->netdev_ops->ndo_flow_offload_check(&hw_path);
++
+ out = (IS_GMAC1_MODE) ? hw_path.virt_dev : hw_path.dev;
++ if (hw_path.flags & FLOW_OFFLOAD_PATH_TNL && mtk_tnl_encap_offload) {
++ if (ntohs(skb->protocol) == ETH_P_IP
++ && ip_hdr(skb)->protocol == IPPROTO_TCP) {
++ skb_hnat_set_tops(skb, hw_path.tnl_type + 1);
++ } else {
++ /*
++ * we are not support protocols other than IPv4 TCP
++ * for tunnel protocol offload yet
++ */
++ skb_hnat_alg(skb) = 1;
++ return 0;
++ }
++ }
+ }
+
+ if (!IS_LAN_GRP(out) && !IS_WAN(out) && !IS_EXT(out))
++ is_virt_dev = true;
++
++ if (is_virt_dev
++ && !(skb_hnat_tops(skb) && skb_hnat_is_encap(skb)
++ && (hw_path.flags & FLOW_OFFLOAD_PATH_TNL)))
+ return 0;
+
+ trace_printk("[%s] case hit, %x-->%s, reason=%x\n", __func__,
+@@ -2675,9 +2834,18 @@ static unsigned int mtk_hnat_nf_post_routing(
+ if (fn && !mtk_hnat_accel_type(skb))
+ break;
+
+- if (fn && fn(skb, arp_dev, &hw_path))
++ if (!is_virt_dev && fn && fn(skb, arp_dev, &hw_path))
+ break;
+
++ /* skb_hnat_tops(skb) is updated in mtk_tnl_offload() */
++ if (skb_hnat_tops(skb)) {
++ if (skb_hnat_is_encap(skb) && !is_virt_dev
++ && mtk_tnl_encap_offload && mtk_tnl_encap_offload(skb))
++ break;
++ if (skb_hnat_is_decap(skb))
++ break;
++ }
++
+ spin_lock(&hnat_priv->entry_lock);
+ skb_to_hnat_info(skb, out, entry, &hw_path);
+ spin_unlock(&hnat_priv->entry_lock);
+@@ -2951,7 +3119,7 @@ mtk_hnat_ipv4_nf_local_out(void *priv, struct sk_buff *skb,
+ if (iph->protocol == IPPROTO_IPV6) {
+ entry->udib1.pkt_type = IPV6_6RD;
+ hnat_set_head_frags(state, skb, 0, hnat_set_alg);
+- } else {
++ } else if (!skb_hnat_tops(skb)) {
+ hnat_set_head_frags(state, skb, 1, hnat_set_alg);
+ }
+
+--- a/drivers/net/ethernet/mediatek/mtk_hnat/nf_hnat_mtk.h
++++ b/drivers/net/ethernet/mediatek/mtk_hnat/nf_hnat_mtk.h
+@@ -44,7 +44,9 @@ struct hnat_desc {
+ u32 is_sp : 1;
+ u32 hf : 1;
+ u32 amsdu : 1;
+- u32 resv3 : 19;
++ u32 tops : 6;
++ u32 is_decap : 1;
++ u32 resv3 : 12;
+ u32 magic_tag_protect : 16;
+ } __packed;
+ #elif defined(CONFIG_MEDIATEK_NETSYS_RX_V2)
+@@ -91,6 +93,19 @@ struct hnat_desc {
+ ((((skb_headroom(skb) >= FOE_INFO_LEN) ? 1 : 0)))
+
+ #define skb_hnat_info(skb) ((struct hnat_desc *)(skb->head))
++#if defined(CONFIG_MEDIATEK_NETSYS_V3)
++#define skb_hnat_tops(skb) (((struct hnat_desc *)((skb)->head))->tops)
++#define skb_hnat_is_decap(skb) (((struct hnat_desc *)((skb)->head))->is_decap)
++#define skb_hnat_is_encap(skb) (!skb_hnat_is_decap(skb))
++#define skb_hnat_set_tops(skb, tops) ((skb_hnat_tops(skb)) = (tops))
++#define skb_hnat_set_is_decap(skb, is_decap) ((skb_hnat_is_decap(skb)) = (is_decap))
++#else /* !defined(CONFIG_MEDIATEK_NETSYS_V3) */
++#define skb_hnat_tops(skb) (0)
++#define skb_hnat_is_decap(skb) (0)
++#define skb_hnat_is_encap(skb) (0)
++#define skb_hnat_set_tops(skb, tops)
++#define skb_hnat_set_is_decap(skb, is_decap)
++#endif /* defined(CONFIG_MEDIATEK_NETSYS_V3) */
+ #define skb_hnat_magic(skb) (((struct hnat_desc *)(skb->head))->magic)
+ #define skb_hnat_reason(skb) (((struct hnat_desc *)(skb->head))->crsn)
+ #define skb_hnat_entry(skb) (((struct hnat_desc *)(skb->head))->entry)
+--- a/include/net/netfilter/nf_flow_table.h
++++ b/include/net/netfilter/nf_flow_table.h
+@@ -98,10 +98,22 @@ struct flow_offload {
+ #define FLOW_OFFLOAD_PATH_6RD BIT(5)
+ #define FLOW_OFFLOAD_PATH_TNL BIT(6)
+
++enum flow_offload_tnl {
++ FLOW_OFFLOAD_TNL_GRETAP,
++ FLOW_OFFLOAD_TNL_PPTP,
++ FLOW_OFFLOAD_TNL_IP_L2TP,
++ FLOW_OFFLOAD_TNL_UDP_L2TP_CTRL,
++ FLOW_OFFLOAD_TNL_UDP_L2TP_DATA,
++ FLOW_OFFLOAD_VXLAN,
++ FLOW_OFFLOAD_NATT,
++ __FLOW_OFFLOAD_MAX,
++};
++
+ struct flow_offload_hw_path {
+ struct net_device *dev;
+ struct net_device *virt_dev;
+ u32 flags;
++ u32 tnl_type;
+
+ u8 eth_src[ETH_ALEN];
+ u8 eth_dest[ETH_ALEN];
+--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+@@ -1904,6 +1904,9 @@ extern const struct of_device_id of_mtk_match[];
+ extern u32 mtk_hwlro_stats_ebl;
+ extern u32 dbg_show_level;
+
++/* tunnel offload related */
++extern struct net_device *(*mtk_get_tnl_dev)(int tnl_idx);
++
+ /* read the hardware status register */
+ void mtk_stats_update_mac(struct mtk_mac *mac);
+