[][openwrt][mt7988][tops][change maintenance for eth and hnat from patch way to files-5.4]

[Description]
Change maintenance for eth and hnat from patch way to files-5.4

[Release-log]
N/A

Change-Id: Ib6858b1286bbd0247564ca6b0c70853eee5a6d98
Reviewed-on: https://gerrit.mediatek.inc/c/openwrt/feeds/mtk_openwrt_feeds/+/9597187
diff --git a/21.02/files/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_eth_reset.c b/21.02/files/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_eth_reset.c
index 48dda83..e4fa933 100644
--- a/21.02/files/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_eth_reset.c
+++ b/21.02/files/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_eth_reset.c
@@ -989,6 +989,9 @@
 	struct mtk_eth *eth = container_of(n, struct mtk_eth, netdevice_notifier);
 
 	switch (event) {
+	case MTK_TOPS_DUMP_DONE:
+		complete(&wait_tops_done);
+		break;
 	case MTK_WIFI_RESET_DONE:
 	case MTK_FE_STOP_TRAFFIC_DONE:
 		pr_info("%s rcv done event:%lx\n", __func__, event);
diff --git a/21.02/files/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_eth_reset.h b/21.02/files/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_eth_reset.h
index cab6ab0..a609d01 100644
--- a/21.02/files/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_eth_reset.h
+++ b/21.02/files/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_eth_reset.h
@@ -13,6 +13,7 @@
 #define MTK_WIFI_RESET_DONE	0x2002
 #define MTK_WIFI_CHIP_ONLINE 	0x2003
 #define MTK_WIFI_CHIP_OFFLINE 	0x2004
+#define MTK_TOPS_DUMP_DONE	0x3001
 #define MTK_FE_RESET_NAT_DONE	0x4001
 
 #define MTK_FE_STOP_TRAFFIC	(0x2005)
@@ -72,6 +73,7 @@
 
 int mtk_eth_netdevice_event(struct notifier_block *n, unsigned long event, void *ptr);
 extern struct completion wait_ser_done;
+extern struct completion wait_tops_done;
 extern char* mtk_reset_event_name[32];
 extern atomic_t reset_lock;
 extern struct completion wait_nat_done;
diff --git a/21.02/files/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/21.02/files/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index 0410648..b61bfb2 100644
--- a/21.02/files/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/21.02/files/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -39,6 +39,7 @@
 module_param_named(msg_level, mtk_msg_level, int, 0);
 MODULE_PARM_DESC(msg_level, "Message level (-1=defaults,0=none,...,16=all)");
 DECLARE_COMPLETION(wait_ser_done);
+DECLARE_COMPLETION(wait_tops_done);
 
 #define MTK_ETHTOOL_STAT(x) { #x, \
 			      offsetof(struct mtk_hw_stats, x) / sizeof(u64) }
@@ -290,6 +291,13 @@
 	"top_netsys_warp_sel", "top_macsec_sel",
 };
 
+u32 (*mtk_get_tnl_netsys_params)(struct sk_buff *skb) = NULL;
+EXPORT_SYMBOL(mtk_get_tnl_netsys_params);
+struct net_device *(*mtk_get_tnl_dev)(u8 tops_crsn) = NULL;
+EXPORT_SYMBOL(mtk_get_tnl_dev);
+void (*mtk_set_tops_crsn)(struct sk_buff *skb, u8 tops_crsn) = NULL;
+EXPORT_SYMBOL(mtk_set_tops_crsn);
+
 void mtk_w32(struct mtk_eth *eth, u32 val, unsigned reg)
 {
 	__raw_writel(val, eth->base + reg);
@@ -2025,6 +2033,10 @@
 	struct mtk_eth *eth = mac->hw;
 	struct mtk_tx_dma_v2 *desc = txd;
 	u32 data = 0;
+	u32 params;
+	u8 tops_entry  = 0;
+	u8 tport = 0;
+	u8 cdrt = 0;
 
 	WRITE_ONCE(desc->txd1, info->addr);
 
@@ -2050,6 +2062,36 @@
 		trace_printk("[%s] skb_shinfo(skb)->nr_frags=%x HNAT_SKB_CB2(skb)->magic=%x txd4=%x<-----\n",
 			     __func__, skb_shinfo(skb)->nr_frags, HNAT_SKB_CB2(skb)->magic, data);
 
+#endif
+
+#if IS_ENABLED(CONFIG_MEDIATEK_NETSYS_V3)
+	if (mtk_get_tnl_netsys_params && skb && !(skb->inner_protocol == IPPROTO_ESP)) {
+		params = mtk_get_tnl_netsys_params(skb);
+		tops_entry = params & 0x000000FF;
+		tport = (params & 0x0000FF00) >> 8;
+		cdrt = (params & 0x00FF0000) >> 16;
+	}
+
+	/* forward to eip197 if this packet is going to encrypt */
+#if IS_ENABLED(CONFIG_NET_MEDIATEK_HNAT) || IS_ENABLED(CONFIG_NET_MEDIATEK_HNAT_MODULE)
+	else if (unlikely(skb->inner_protocol == IPPROTO_ESP &&
+		 skb_hnat_cdrt(skb) && is_magic_tag_valid(skb))) {
+		/* carry cdrt index for encryption */
+		cdrt = skb_hnat_cdrt(skb);
+		skb_hnat_magic_tag(skb) = 0;
+#else
+	else if (unlikely(skb->inner_protocol == IPPROTO_ESP &&
+		 skb_tnl_cdrt(skb) && is_tnl_tag_valid(skb))) {
+		cdrt = skb_tnl_cdrt(skb);
+		skb_tnl_magic_tag(skb) = 0;
+#endif
+		tport = EIP197_QDMA_TPORT;
+	}
+
+	if (tport) {
+		data &= ~(TX_DMA_TPORT_MASK << TX_DMA_TPORT_SHIFT);
+		data |= (tport & TX_DMA_TPORT_MASK) << TX_DMA_TPORT_SHIFT;
+	}
 #endif
 	WRITE_ONCE(desc->txd4, data);
 
@@ -2072,7 +2114,20 @@
 	WRITE_ONCE(desc->txd6, data);
 
 	WRITE_ONCE(desc->txd7, 0);
-	WRITE_ONCE(desc->txd8, 0);
+
+	data = 0;
+
+	if (tops_entry) {
+		data &= ~(TX_DMA_TOPS_ENTRY_MASK << TX_DMA_TOPS_ENTRY_SHIFT);
+		data |= (tops_entry & TX_DMA_TOPS_ENTRY_MASK) << TX_DMA_TOPS_ENTRY_SHIFT;
+	}
+
+	if (cdrt) {
+		data &= ~(TX_DMA_CDRT_MASK << TX_DMA_CDRT_SHIFT);
+		data |= (cdrt & TX_DMA_CDRT_MASK) << TX_DMA_CDRT_SHIFT;
+	}
+
+	WRITE_ONCE(desc->txd8, data);
 }
 
 static void mtk_tx_set_pdma_desc(struct sk_buff *skb, struct net_device *dev, void *txd,
@@ -2444,6 +2499,7 @@
 	struct mtk_rx_ring *ring = rx_napi->rx_ring;
 	int idx;
 	struct sk_buff *skb;
+	u8 tops_crsn = 0;
 	u8 *data, *new_data;
 	struct mtk_rx_dma_v2 *rxd, trxd;
 	int done = 0;
@@ -2484,11 +2540,20 @@
 				      0 : RX_DMA_GET_SPORT(trxd.rxd4) - 1;
 		}
 
-		if (unlikely(mac < 0 || mac >= MTK_MAC_COUNT ||
-			     !eth->netdev[mac]))
-			goto release_desc;
+		tops_crsn = RX_DMA_GET_TOPS_CRSN(trxd.rxd6);
+		if (mtk_get_tnl_dev && tops_crsn) {
+			netdev = mtk_get_tnl_dev(tops_crsn);
+			if (IS_ERR(netdev))
+				netdev = NULL;
+		}
 
-		netdev = eth->netdev[mac];
+		if (!netdev) {
+			if (unlikely(mac < 0 || mac >= MTK_MAC_COUNT ||
+				     !eth->netdev[mac]))
+				goto release_desc;
+
+			netdev = eth->netdev[mac];
+		}
 
 		if (unlikely(test_bit(MTK_RESETTING, &eth->state)))
 			goto release_desc;
@@ -2575,7 +2640,11 @@
 
 		skb_hnat_alg(skb) = 0;
 		skb_hnat_filled(skb) = 0;
+		skb_hnat_set_cdrt(skb, RX_DMA_GET_CDRT(trxd.rxd7));
 		skb_hnat_magic_tag(skb) = HNAT_MAGIC_TAG;
+		skb_hnat_set_tops(skb, 0);
+		skb_hnat_set_is_decap(skb, 0);
+		skb_hnat_set_is_decrypt(skb, (skb_hnat_cdrt(skb) ? 1 : 0));
 
 		if (skb_hnat_reason(skb) == HIT_BIND_FORCE_TO_CPU) {
 			if (eth_debug_level >= 7)
@@ -2589,6 +2658,9 @@
 				     __func__, skb_hnat_entry(skb), skb_hnat_sport(skb),
 				     skb_hnat_reason(skb), skb_hnat_alg(skb));
 #endif
+		if (mtk_set_tops_crsn && skb && tops_crsn)
+			mtk_set_tops_crsn(skb, tops_crsn);
+
 		if (mtk_hwlro_stats_ebl &&
 		    IS_HW_LRO_RING(ring->ring_no) && eth->hwlro) {
 			hw_lro_stats_update(ring->ring_no, &trxd);
@@ -4859,6 +4931,8 @@
 			}
 			pr_warn("wait for MTK_FE_START_RESET\n");
 		}
+		if (!try_wait_for_completion(&wait_tops_done))
+			pr_warn("wait for MTK_TOPS_DUMP_DONE\n");
 		rtnl_lock();
 		break;
 	}
diff --git a/21.02/files/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/21.02/files/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_eth_soc.h
index 3f92089..15bb8dd 100644
--- a/21.02/files/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+++ b/21.02/files/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_eth_soc.h
@@ -580,6 +580,12 @@
 #define MTK_QDMA_GMAC2_QID	8
 #define MTK_QDMA_GMAC3_QID	6
 
+/* QDMA V2 descriptor txd8 */
+#define TX_DMA_CDRT_SHIFT          0
+#define TX_DMA_CDRT_MASK           0xff
+#define TX_DMA_TOPS_ENTRY_SHIFT    8
+#define TX_DMA_TOPS_ENTRY_MASK     0x3f
+
 /* QDMA V2 descriptor txd6 */
 #define TX_DMA_INS_VLAN_V2         BIT(16)
 
@@ -589,6 +595,9 @@
 #define TX_DMA_SPTAG_V3            BIT(27)
 
 /* QDMA V2 descriptor txd4 */
+#define EIP197_QDMA_TPORT          3
+#define TX_DMA_TPORT_SHIFT         0
+#define TX_DMA_TPORT_MASK          0xf
 #define TX_DMA_FPORT_SHIFT_V2      8
 #define TX_DMA_FPORT_MASK_V2       0xf
 #define TX_DMA_SWC_V2              BIT(30)
@@ -702,6 +711,9 @@
 #define RX_DMA_GET_AGG_CNT_V2(_x)	(((_x) >> 16) & 0xff)
 #define RX_DMA_GET_TOPS_CRSN(_x)	(((_x) >> 24) & 0xff)
 
+/* PDMA V2 descriptor rxd7 */
+#define RX_DMA_GET_CDRT(_x)		(((_x) >> 8) & 0xff)
+
 /* PHY Polling and SMI Master Control registers */
 #define MTK_PPSC		0x10000
 #define PPSC_MDC_CFG		GENMASK(29, 24)
@@ -1089,6 +1101,48 @@
 #define MT7628_SDM_MAC_ADRL	(MT7628_SDM_OFFSET + 0x0c)
 #define MT7628_SDM_MAC_ADRH	(MT7628_SDM_OFFSET + 0x10)
 
+#if defined(CONFIG_MEDIATEK_NETSYS_V3)
+#if !defined(CONFIG_NET_MEDIATEK_HNAT) && !defined(CONFIG_NET_MEDIATEK_HNAT_MODULE)
+struct tnl_desc {
+	u32 entry : 15;
+	u32 filled : 3;
+	u32 crsn : 5;
+	u32 resv1 : 3;
+	u32 sport : 4;
+	u32 resv2 : 1;
+	u32 alg : 1;
+	u32 iface : 8;
+	u32 wdmaid : 2;
+	u32 rxid : 2;
+	u32 wcid : 16;
+	u32 bssid : 8;
+	u32 usr_info : 16;
+	u32 tid : 4;
+	u32 is_fixedrate : 1;
+	u32 is_prior : 1;
+	u32 is_sp : 1;
+	u32 hf : 1;
+	u32 amsdu : 1;
+	u32 tops : 6;
+	u32 is_decap : 1;
+	u32 cdrt : 8;
+	u32 resv3 : 4;
+	u32 magic_tag_protect : 16;
+} __packed;
+
+#define TNL_MAGIC_TAG 0x6789
+#define skb_tnl_cdrt(skb) (((struct tnl_desc *)((skb)->head))->cdrt)
+#define skb_tnl_set_cdrt(skb, cdrt) ((skb_tnl_cdrt(skb)) = (cdrt))
+#define skb_tnl_magic_tag(skb) (((struct tnl_desc *)((skb)->head))->magic_tag_protect)
+#define is_tnl_tag_valid(skb) (skb_tnl_magic_tag(skb) == TNL_MAGIC_TAG)
+#else /* defined(CONFIG_NET_MEDIATEK_HNAT) || defined(CONFIG_NET_MEDIATEK_HNAT_MODULE) */
+#define skb_tnl_cdrt(skb) (0)
+#define skb_tnl_set_cdrt(skb, cdrt) (0)
+#define skb_tnl_magic_tag(skb) (0)
+#define is_tnl_tag_valid(skb) (0)
+#endif /* !defined(CONFIG_NET_MEDIATEK_HNAT) && !defined(CONFIG_NET_MEDIATEK_HNAT_MODULE) */
+#endif /* defined(CONFIG_MEDIATEK_NETSYS_V3) */
+
 struct mtk_rx_dma {
 	unsigned int rxd1;
 	unsigned int rxd2;
@@ -2102,6 +2156,11 @@
 extern u32 mtk_hwlro_stats_ebl;
 extern u32 dbg_show_level;
 
+/* tunnel offload related */
+extern u32 (*mtk_get_tnl_netsys_params)(struct sk_buff *skb);
+extern struct net_device *(*mtk_get_tnl_dev)(u8 tops_crsn);
+extern void (*mtk_set_tops_crsn)(struct sk_buff *skb, u8 tops_crsn);
+
 /* read the hardware status register */
 void mtk_stats_update_mac(struct mtk_mac *mac);
 
diff --git a/21.02/files/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_hnat/hnat.c b/21.02/files/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_hnat/hnat.c
index d4f74c1..7108152 100644
--- a/21.02/files/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_hnat/hnat.c
+++ b/21.02/files/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_hnat/hnat.c
@@ -52,6 +52,14 @@
 EXPORT_SYMBOL(ppe_dev_register_hook);
 void (*ppe_dev_unregister_hook)(struct net_device *dev) = NULL;
 EXPORT_SYMBOL(ppe_dev_unregister_hook);
+int (*mtk_tnl_encap_offload)(struct sk_buff *skb) = NULL;
+EXPORT_SYMBOL(mtk_tnl_encap_offload);
+int (*mtk_tnl_decap_offload)(struct sk_buff *skb) = NULL;
+EXPORT_SYMBOL(mtk_tnl_decap_offload);
+bool (*mtk_tnl_decap_offloadable)(struct sk_buff *skb) = NULL;
+EXPORT_SYMBOL(mtk_tnl_decap_offloadable);
+bool (*mtk_crypto_offloadable)(struct sk_buff *skb) = NULL;
+EXPORT_SYMBOL(mtk_crypto_offloadable);
 
 int (*hnat_set_wdma_pse_port_state)(u32 wdma_idx, bool up) = NULL;
 EXPORT_SYMBOL(hnat_set_wdma_pse_port_state);
@@ -65,6 +73,16 @@
 			     SMA, SMA_FWD_CPU_BUILD_ENTRY);
 }
 
+struct foe_entry *hnat_get_foe_entry(u32 ppe_id, u32 index)
+{
+	if (index == 0x7fff || index >= hnat_priv->foe_etry_num ||
+	    ppe_id >= CFG_PPE_NUM)
+		return ERR_PTR(-EINVAL);
+
+	return &hnat_priv->foe_table_cpu[ppe_id][index];
+}
+EXPORT_SYMBOL(hnat_get_foe_entry);
+
 void hnat_cache_ebl(int enable)
 {
 	int i;
@@ -75,6 +93,7 @@
 		cr_set_field(hnat_priv->ppe_base[i] + PPE_CAH_CTRL, CAH_EN, enable);
 	}
 }
+EXPORT_SYMBOL(hnat_cache_ebl);
 
 static void hnat_reset_timestamp(struct timer_list *t)
 {
diff --git a/21.02/files/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_hnat/hnat.h b/21.02/files/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_hnat/hnat.h
index f1bb6e9..ad23b8d 100644
--- a/21.02/files/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_hnat/hnat.h
+++ b/21.02/files/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_hnat/hnat.h
@@ -1155,6 +1155,12 @@
 #define NR_WDMA2_PORT 13
 #define NR_GMAC3_PORT 15
 #define NR_QDMA_TPORT 1
+#define NR_EIP197_TPORT 2
+#define NR_EIP197_QDMA_TPORT 3
+#define NR_TDMA_TPORT 4
+#define NR_TDMA_QDMA_TPORT 5
+#define NR_TDMA_EIP197_TPORT 8
+#define NR_TDMA_EIP197_QDMA_TPORT 9
 #define WAN_DEV_NAME hnat_priv->wan
 #define LAN_DEV_NAME hnat_priv->lan
 #define LAN2_DEV_NAME hnat_priv->lan2
@@ -1307,6 +1313,8 @@
 #endif
 }
 
+struct foe_entry *hnat_get_foe_entry(u32 ppe_id, u32 index);
+
 void hnat_deinit_debugfs(struct mtk_hnat *h);
 int hnat_init_debugfs(struct mtk_hnat *h);
 int hnat_register_nf_hooks(void);
@@ -1323,7 +1331,14 @@
 extern int hook_toggle;
 extern int mape_toggle;
 extern int qos_toggle;
-
+extern int tnl_toggle;
+extern int (*mtk_tnl_encap_offload)(struct sk_buff *skb);
+extern int (*mtk_tnl_decap_offload)(struct sk_buff *skb);
+extern bool (*mtk_tnl_decap_offloadable)(struct sk_buff *skb);
+extern bool (*mtk_crypto_offloadable)(struct sk_buff *skb);
+extern int hnat_bind_crypto_entry(struct sk_buff *skb,
+				  const struct net_device *dev,
+				  int fill_inner_info);
 int ext_if_add(struct extdev_entry *ext_entry);
 int ext_if_del(struct extdev_entry *ext_entry);
 void cr_set_field(void __iomem *reg, u32 field, u32 val);
diff --git a/21.02/files/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_hnat/hnat_debugfs.c b/21.02/files/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_hnat/hnat_debugfs.c
index 53f4d10..29ea69b 100644
--- a/21.02/files/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_hnat/hnat_debugfs.c
+++ b/21.02/files/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_hnat/hnat_debugfs.c
@@ -36,6 +36,7 @@
 int qos_toggle;
 int qos_dl_toggle = 1;
 int qos_ul_toggle = 1;
+int tnl_toggle;
 int xlat_toggle;
 struct hnat_desc headroom[DEF_ETRY_NUM];
 unsigned int dbg_cpu_reason_cnt[MAX_CRSN_NUM];
@@ -2687,6 +2688,47 @@
 	.release = single_release,
 };
 
+static int hnat_tnl_toggle_read(struct seq_file *m, void *private)
+{
+	pr_info("value=%d, tnl is %s now!\n",
+		tnl_toggle, (tnl_toggle) ? "enabled" : "disabled");
+
+	return 0;
+}
+
+static int hnat_tnl_toggle_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, hnat_tnl_toggle_read, file->private_data);
+}
+
+static ssize_t hnat_tnl_toggle_write(struct file *file,
+				     const char __user *buffer,
+				     size_t count, loff_t *data)
+{
+	char buf[8] = {0};
+	int len = count;
+
+	if ((len > 8) || copy_from_user(buf, buffer, len))
+		return -EFAULT;
+
+	if (buf[0] == '1' && !tnl_toggle) {
+		pr_info("tnl is going to be enabled !\n");
+		tnl_toggle = 1;
+	} else if (buf[0] == '0' && tnl_toggle) {
+		pr_info("tnl is going to be disabled !\n");
+		tnl_toggle = 0;
+	}
+
+	return len;
+}
+
+static const struct file_operations hnat_tnl_toggle_fops = {
+	.open = hnat_tnl_toggle_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.write = hnat_tnl_toggle_write,
+	.release = single_release,
+};
 static int hnat_xlat_toggle_read(struct seq_file *m, void *private)
 {
 	pr_info("value=%d, xlat is %s now!\n",
@@ -3523,6 +3565,8 @@
 			    &hnat_ppd_if_fops);
 	debugfs_create_file("static_entry", 0444, root, h,
 			    &hnat_static_fops);
+	debugfs_create_file("tnl_toggle", 0444, root, h,
+			    &hnat_tnl_toggle_fops);
 	debugfs_create_file("xlat_toggle", 0444, root, h,
 			    &hnat_xlat_toggle_fops);
 	debugfs_create_file("xlat_cfg", 0444, root, h,
diff --git a/21.02/files/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_hnat/hnat_nf_hook.c b/21.02/files/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_hnat/hnat_nf_hook.c
index 9aa362c..18f514c 100644
--- a/21.02/files/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_hnat/hnat_nf_hook.c
+++ b/21.02/files/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_hnat/hnat_nf_hook.c
@@ -750,10 +750,14 @@
 	case ETH_P_IP:
 		iph = ip_hdr(skb);
 
-		/* do not accelerate non tcp/udp traffic */
-		if ((iph->protocol == IPPROTO_TCP) ||
-		    (iph->protocol == IPPROTO_UDP) ||
-		    (iph->protocol == IPPROTO_IPV6)) {
+		if (mtk_tnl_decap_offloadable && mtk_tnl_decap_offloadable(skb)) {
+			/* tunnel protocol is offloadable */
+			skb_hnat_set_is_decap(skb, 1);
+			return 1;
+		} else if ((iph->protocol == IPPROTO_TCP) ||
+			   (iph->protocol == IPPROTO_UDP) ||
+			   (iph->protocol == IPPROTO_IPV6)) {
+			/* do not accelerate non tcp/udp traffic */
 			return 1;
 		}
 
@@ -850,6 +854,43 @@
 	return NF_DROP;
 }
 
+static inline void qos_rate_limit_set(u32 id, const struct net_device *dev)
+{
+	const struct mtk_mac *mac;
+	u32 max_man = SPEED_10000 / SPEED_100;
+	u32 max_exp = 5;
+	u32 cfg;
+
+	if (id > MTK_QDMA_TX_NUM)
+		return;
+
+	if (!dev)
+		goto setup_rate_limit;
+
+	mac = netdev_priv(dev);
+
+	switch (mac->speed) {
+	case SPEED_100:
+	case SPEED_1000:
+	case SPEED_2500:
+	case SPEED_5000:
+	case SPEED_10000:
+		max_man = mac->speed / SPEED_100;
+		break;
+	default:
+		return;
+	}
+
+setup_rate_limit:
+	cfg = QTX_SCH_MIN_RATE_EN | QTX_SCH_MAX_RATE_EN;
+	cfg |= (1 << QTX_SCH_MIN_RATE_MAN_OFFSET) |
+	       (4 << QTX_SCH_MIN_RATE_EXP_OFFSET) |
+	       (max_man << QTX_SCH_MAX_RATE_MAN_OFFSET) |
+	       (max_exp << QTX_SCH_MAX_RATE_EXP_OFFSET) |
+	       (4 << QTX_SCH_MAX_RATE_WGHT_OFFSET);
+	writel(cfg, hnat_priv->fe_base + QTX_SCH(id % NUM_OF_Q_PER_PAGE));
+}
+
 static unsigned int
 mtk_hnat_ipv4_nf_pre_routing(void *priv, struct sk_buff *skb,
 			     const struct nf_hook_state *state)
@@ -872,13 +913,28 @@
 	hw_path.dev = skb->dev;
 	hw_path.virt_dev = skb->dev;
 
+	if (skb_hnat_tops(skb) && skb_hnat_is_decap(skb) &&
+	    is_magic_tag_valid(skb) &&
+	    skb_hnat_iface(skb) == FOE_MAGIC_GE_VIRTUAL &&
+	    mtk_tnl_decap_offload && !mtk_tnl_decap_offload(skb)) {
+		if (skb_hnat_cdrt(skb) && skb_hnat_is_decrypt(skb))
+			/*
+			 * inbound flow of offload engines use QID 13
+			 * set its rate limit to maximum
+			 */
+			qos_rate_limit_set(13, NULL);
+
+		return NF_ACCEPT;
+	}
+
 	/*
 	 * Avoid mistakenly binding of outer IP, ports in SW L2TP decap flow.
 	 * In pre-routing, if dev is virtual iface, TOPS module is not loaded,
 	 * and it's L2TP flow, then do not bind.
 	 */
-	if (skb_hnat_iface(skb) == FOE_MAGIC_GE_VIRTUAL
-	    && skb->dev->netdev_ops->ndo_flow_offload_check) {
+	if (skb_hnat_iface(skb) == FOE_MAGIC_GE_VIRTUAL &&
+	    skb->dev->netdev_ops->ndo_flow_offload_check &&
+	    !mtk_tnl_decap_offload) {
 		skb->dev->netdev_ops->ndo_flow_offload_check(&hw_path);
 
 		if (hw_path.flags & FLOW_OFFLOAD_PATH_TNL)
@@ -947,6 +1003,20 @@
 
 	hnat_set_head_frags(state, skb, -1, hnat_set_iif);
 
+	if (skb_hnat_tops(skb) && skb_hnat_is_decap(skb) &&
+	    is_magic_tag_valid(skb) &&
+	    skb_hnat_iface(skb) == FOE_MAGIC_GE_VIRTUAL &&
+	    mtk_tnl_decap_offload && !mtk_tnl_decap_offload(skb)) {
+		if (skb_hnat_cdrt(skb) && skb_hnat_is_decrypt(skb))
+			/*
+			 * inbound flow of offload engines use QID 13
+			 * set its rate limit to maximum
+			 */
+			qos_rate_limit_set(13, NULL);
+
+		return NF_ACCEPT;
+	}
+
 	pre_routing_print(skb, state->in, state->out, __func__);
 
 	if (unlikely(debug_level >= 7)) {
@@ -1086,6 +1156,9 @@
 		return 0;
 	}
 
+	if (!skb_hnat_cdrt(skb) && dst && dst_xfrm(dst))
+		return 0;
+
 	rcu_read_lock_bh();
 	nexthop = (__force u32)rt_nexthop(rt, ip_hdr(skb)->daddr);
 	neigh = __ipv4_neigh_lookup_noref(dev, nexthop);
@@ -1102,6 +1175,17 @@
 		return -1;
 	}
 
+	/*
+	 * if this packet is a tunnel packet and is about to construct
+	 * outer header, we must update its outer mac header pointer
+	 * before filling outer mac or it may screw up inner mac
+	 */
+	if ((skb_hnat_tops(skb) && skb_hnat_is_encap(skb)) ||
+	    (skb_hnat_cdrt(skb) && skb_hnat_is_encrypt(skb))) {
+		skb_push(skb, sizeof(struct ethhdr));
+		skb_reset_mac_header(skb);
+	}
+
 	if (ip_hdr(skb)->protocol == IPPROTO_IPV6)
 		/* 6RD LAN->WAN(6to4) */
 		eth = (struct ethhdr *)(skb->data - ETH_HLEN);
@@ -1112,6 +1196,10 @@
 	memcpy(eth->h_source, out->dev_addr, ETH_ALEN);
 	eth->h_proto = htons(ETH_P_IP);
 
+	if ((skb_hnat_tops(skb) && skb_hnat_is_encap(skb)) ||
+	    (skb_hnat_cdrt(skb) && skb_hnat_is_encrypt(skb)))
+		skb_pull(skb, sizeof(struct ethhdr));
+
 	rcu_read_unlock_bh();
 
 	return 0;
@@ -1239,6 +1327,254 @@
 
 	return eth;
 }
+
+static inline void hnat_get_filled_unbind_entry(struct sk_buff *skb,
+						struct foe_entry *entry)
+{
+	if (unlikely(!skb || !entry))
+		return;
+
+	memcpy(entry,
+	       &hnat_priv->foe_table_cpu[skb_hnat_ppe(skb)][skb_hnat_entry(skb)],
+	       sizeof(*entry));
+
+#if defined(CONFIG_MEDIATEK_NETSYS_V2) || defined(CONFIG_MEDIATEK_NETSYS_V3)
+	entry->bfib1.mc = 0;
+#endif /* defined(CONFIG_MEDIATEK_NETSYS_V2) || defined(CONFIG_MEDIATEK_NETSYS_V3) */
+	entry->bfib1.ka = 0;
+	entry->bfib1.vlan_layer = 0;
+	entry->bfib1.psn = 0;
+	entry->bfib1.vpm = 0;
+	entry->bfib1.ps = 0;
+}
+
+/*
+ * check offload engine data is prepared
+ * return 0 for packets not related to offload engine
+ * return positive value for offload engine prepared data done
+ * return negative value for data is still constructing
+ */
+static inline int hnat_offload_engine_done(struct sk_buff *skb,
+					   struct flow_offload_hw_path *hw_path)
+{
+	struct dst_entry *dst = skb_dst(skb);
+
+	if ((skb_hnat_tops(skb) && !(hw_path->flags & FLOW_OFFLOAD_PATH_TNL))) {
+		if (!tnl_toggle)
+			return -1;
+
+		/* tunnel encap'ed */
+		if (dst && dst_xfrm(dst))
+			/*
+			 * skb not ready to bind since it is still needs
+			 * to be encrypted
+			 */
+			return -1;
+
+		/* nothing need to be done further for this skb */
+		return 1;
+	}
+
+	/* no need for tunnel encapsulation or crypto encryption */
+	return 0;
+}
+
+static inline void hnat_fill_offload_engine_entry(struct sk_buff *skb,
+						  struct foe_entry *entry,
+						  const struct net_device *dev)
+{
+#if defined(CONFIG_MEDIATEK_NETSYS_V3)
+	if (!tnl_toggle)
+		return;
+
+	if (skb_hnat_tops(skb) && skb_hnat_is_encap(skb)) {
+		/*
+		 * if skb_hnat_tops(skb) is setup for encapsulation,
+		 * we fill in hnat tport and tops_entry for tunnel encapsulation
+		 * offloading
+		 */
+		if (skb_hnat_cdrt(skb) && skb_hnat_is_encrypt(skb)) {
+			entry->ipv4_hnapt.tport_id = NR_TDMA_EIP197_QDMA_TPORT;
+			entry->ipv4_hnapt.cdrt_id = skb_hnat_cdrt(skb);
+		} else {
+			entry->ipv4_hnapt.tport_id = NR_TDMA_QDMA_TPORT;
+		}
+		entry->ipv4_hnapt.tops_entry = skb_hnat_tops(skb);
+
+	} else if (skb_hnat_cdrt(skb) && skb_hnat_is_encrypt(skb)) {
+		entry->ipv4_hnapt.tport_id = NR_EIP197_QDMA_TPORT;
+		entry->ipv4_hnapt.cdrt_id = skb_hnat_cdrt(skb);
+	} else {
+		return;
+	}
+
+	/*
+	 * outbound flow of offload engines use QID 12
+	 * set its rate limit to line rate
+	 */
+	entry->ipv4_hnapt.iblk2.qid = 12;
+	qos_rate_limit_set(12, dev);
+#endif /* defined(CONFIG_MEDIATEK_NETSYS_V3) */
+}
+
+int hnat_bind_crypto_entry(struct sk_buff *skb, const struct net_device *dev, int fill_inner_info)
+{
+	struct foe_entry *foe;
+	struct foe_entry entry = { 0 };
+	struct ethhdr *eth = eth_hdr(skb);
+	struct iphdr *iph;
+	struct tcpudphdr _ports;
+	const struct tcpudphdr *pptr;
+	u32 gmac = NR_DISCARD;
+	int udp = 0;
+	struct mtk_mac *mac = netdev_priv(dev);
+	struct flow_offload_hw_path hw_path = { .dev = (struct net_device *) dev,
+						.virt_dev = (struct net_device *) dev };
+
+	if (!tnl_toggle) {
+		pr_notice("tnl_toggle is disable now!|\n");
+		return -1;
+	}
+
+	if (!skb_hnat_is_hashed(skb) || skb_hnat_ppe(skb) >= CFG_PPE_NUM)
+		return 0;
+
+	foe = &hnat_priv->foe_table_cpu[skb_hnat_ppe(skb)][skb_hnat_entry(skb)];
+
+	if (entry_hnat_is_bound(foe))
+		return 0;
+
+	if (eth->h_proto != htons(ETH_P_IP))
+		return 0;
+
+	if (skb_hnat_tops(skb) && mtk_tnl_encap_offload)
+		mtk_tnl_encap_offload(skb);
+
+	hnat_get_filled_unbind_entry(skb, &entry);
+
+	if (dev->netdev_ops->ndo_flow_offload_check)
+		dev->netdev_ops->ndo_flow_offload_check(&hw_path);
+
+	/* For packets pass through VTI (route-based IPSec),
+	 * We need to fill the inner packet info into hnat entry.
+	 * Since the skb->mac_header is not pointed to correct position
+	 * in skb_to_hnat_info().
+	 */
+	if (fill_inner_info) {
+		iph = ip_hdr(skb);
+		switch (iph->protocol) {
+		case IPPROTO_UDP:
+			udp = 1;
+			/* fallthrough */
+		case IPPROTO_TCP:
+			entry.ipv4_hnapt.etype = htons(ETH_P_IP);
+			if (IS_IPV4_GRP(&entry)) {
+				entry.ipv4_hnapt.iblk2.dscp = iph->tos;
+				if (hnat_priv->data->per_flow_accounting)
+					entry.ipv4_hnapt.iblk2.mibf = 1;
+
+				entry.ipv4_hnapt.vlan1 = hw_path.vlan_id;
+
+				if (skb_vlan_tagged(skb)) {
+					entry.bfib1.vlan_layer += 1;
+
+					if (entry.ipv4_hnapt.vlan1)
+						entry.ipv4_hnapt.vlan2 =
+							skb->vlan_tci;
+					else
+						entry.ipv4_hnapt.vlan1 =
+							skb->vlan_tci;
+				}
+
+				entry.ipv4_hnapt.sip = foe->ipv4_hnapt.sip;
+				entry.ipv4_hnapt.dip = foe->ipv4_hnapt.dip;
+				entry.ipv4_hnapt.sport = foe->ipv4_hnapt.sport;
+				entry.ipv4_hnapt.dport = foe->ipv4_hnapt.dport;
+
+				entry.ipv4_hnapt.new_sip = ntohl(iph->saddr);
+				entry.ipv4_hnapt.new_dip = ntohl(iph->daddr);
+
+				if (IS_IPV4_HNAPT(&entry)) {
+					pptr = skb_header_pointer(skb, iph->ihl * 4 + ETH_HLEN,
+								  sizeof(_ports),
+								  &_ports);
+					if (unlikely(!pptr))
+						return -1;
+
+					entry.ipv4_hnapt.new_sport = ntohs(pptr->src);
+					entry.ipv4_hnapt.new_dport = ntohs(pptr->dst);
+				}
+			} else
+				return 0;
+
+			entry.ipv4_hnapt.bfib1.udp = udp;
+
+#if defined(CONFIG_MEDIATEK_NETSYS_V3)
+				entry.ipv4_hnapt.eg_keep_ecn = 1;
+				entry.ipv4_hnapt.eg_keep_dscp = 1;
+#endif
+			break;
+
+		default:
+			return -1;
+		}
+	}
+
+	entry = ppe_fill_info_blk(eth, entry, &hw_path);
+
+	if (IS_LAN(dev)) {
+		if (IS_BOND(dev))
+			gmac = ((skb_hnat_entry(skb) >> 1) % hnat_priv->gmac_num) ?
+				 NR_GMAC2_PORT : NR_GMAC1_PORT;
+		else
+			gmac = NR_GMAC1_PORT;
+	} else if (IS_LAN2(dev)) {
+		gmac = (mac->id == MTK_GMAC2_ID) ? NR_GMAC2_PORT : NR_GMAC3_PORT;
+	} else if (IS_WAN(dev)) {
+		if (IS_GMAC1_MODE)
+			gmac = NR_GMAC1_PORT;
+		else
+			gmac = (mac->id == MTK_GMAC2_ID) ? NR_GMAC2_PORT : NR_GMAC3_PORT;
+	} else {
+		pr_notice("Unknown case of dp, iif=%x --> %s\n", skb_hnat_iface(skb), dev->name);
+		return -1;
+	}
+
+	entry.ipv4_hnapt.iblk2.mibf = 1;
+	entry.ipv4_hnapt.iblk2.dp = gmac;
+	entry.ipv4_hnapt.iblk2.port_mg =
+		(hnat_priv->data->version == MTK_HNAT_V1_1) ? 0x3f : 0;
+	entry.bfib1.ttl = 1;
+	entry.bfib1.state = BIND;
+
+	hnat_fill_offload_engine_entry(skb, &entry, dev);
+
+	if (!skb_hnat_tops(skb)) {
+		entry.ipv4_hnapt.dmac_hi = swab32(*((u32 *)eth->h_dest));
+		entry.ipv4_hnapt.dmac_lo = swab16(*((u16 *)&eth->h_dest[4]));
+		entry.ipv4_hnapt.smac_hi = swab32(*((u32 *)eth->h_source));
+		entry.ipv4_hnapt.smac_lo = swab16(*((u16 *)&eth->h_source[4]));
+	}
+
+	/* wait for entry written done */
+	wmb();
+
+	if (entry_hnat_is_bound(foe))
+		return 0;
+
+	spin_lock(&hnat_priv->entry_lock);
+	memcpy(foe, &entry, sizeof(entry));
+	spin_unlock(&hnat_priv->entry_lock);
+
+	if (hnat_priv->data->per_flow_accounting &&
+	    skb_hnat_entry(skb) < hnat_priv->foe_etry_num &&
+	    skb_hnat_ppe(skb) < CFG_PPE_NUM)
+		memset(&hnat_priv->acct[skb_hnat_ppe(skb)][skb_hnat_entry(skb)],
+		       0, sizeof(struct mib_entry));
+
+	return 0;
+}
+EXPORT_SYMBOL(hnat_bind_crypto_entry);
 
 static unsigned int skb_to_hnat_info(struct sk_buff *skb,
 				     const struct net_device *dev,
@@ -1264,6 +1600,7 @@
 	u32 qid = 0;
 	u32 payload_len = 0;
 	int mape = 0;
+	int ret;
 	int i = 0;
 
 	if (ipv6_hdr(skb)->nexthdr == NEXTHDR_IPIP)
@@ -1279,6 +1616,14 @@
 	if (!hnat_priv->data->mcast && is_multicast_ether_addr(eth->h_dest))
 		return 0;
 
+	ret = hnat_offload_engine_done(skb, hw_path);
+	if (ret == 1) {
+		hnat_get_filled_unbind_entry(skb, &entry);
+		goto hnat_entry_bind;
+	} else if (ret == -1) {
+		return 0;
+	}
+
 	entry.bfib1.pkt_type = foe->udib1.pkt_type; /* Get packte type state*/
 	entry.bfib1.state = foe->udib1.state;
 
@@ -1713,6 +2058,12 @@
 	/* Fill Layer2 Info.*/
 	entry = ppe_fill_L2_info(eth, entry, hw_path);
 
+	if ((skb_hnat_tops(skb) && hw_path->flags & FLOW_OFFLOAD_PATH_TNL) ||
+	    (!skb_hnat_cdrt(skb) && skb_hnat_is_encrypt(skb) &&
+	    skb_dst(skb) && dst_xfrm(skb_dst(skb))))
+		goto hnat_entry_skip_bind;
+
+hnat_entry_bind:
 	/* Fill Info Blk*/
 	entry = ppe_fill_info_blk(eth, entry, hw_path);
 
@@ -1918,6 +2269,10 @@
 		}
 	}
 
+#if defined(CONFIG_MEDIATEK_NETSYS_V3)
+	hnat_fill_offload_engine_entry(skb, &entry, dev);
+#endif
+
 	/* The INFO2.port_mg and 2nd VLAN ID fields of PPE entry are redefined
 	 * by Wi-Fi whnat engine. These data and INFO2.dp will be updated and
 	 * the entry is set to BIND state in mtk_sw_nat_hook_tx().
@@ -1955,6 +2310,7 @@
 		return 0;
 	}
 
+hnat_entry_skip_bind:
 	if (spin_trylock(&hnat_priv->entry_lock)) {
 		/* Final check if the entry is not in UNBIND state,
 		 * we should not modify it right now.
@@ -2074,6 +2430,12 @@
 	switch ((int)entry.bfib1.pkt_type) {
 	case IPV4_HNAPT:
 	case IPV4_HNAT:
+		/*
+		 * skip if packet is an encap tnl packet or it may
+		 * screw up inner mac header
+		 */
+		if (skb_hnat_tops(skb) && skb_hnat_is_encap(skb))
+			break;
 		entry.ipv4_hnapt.smac_hi = swab32(*((u32 *)eth->h_source));
 		entry.ipv4_hnapt.smac_lo = swab16(*((u16 *)&eth->h_source[4]));
 		break;
@@ -2265,6 +2627,10 @@
 		entry.ipv6_5t_route.iblk2.dp = gmac_no & 0xf;
 	}
 
+#if defined(CONFIG_MEDIATEK_NETSYS_V3)
+	hnat_fill_offload_engine_entry(skb, &entry, NULL);
+#endif
+
 	entry.bfib1.ttl = 1;
 	entry.bfib1.state = BIND;
 
@@ -2369,6 +2735,9 @@
 
 	skb_hnat_alg(skb) = 0;
 	skb_hnat_filled(skb) = 0;
+	skb_hnat_set_tops(skb, 0);
+	skb_hnat_set_cdrt(skb, 0);
+	skb_hnat_set_is_decrypt(skb, 0);
 	skb_hnat_magic_tag(skb) = HNAT_MAGIC_TAG;
 
 	if (skb_hnat_iface(skb) == FOE_MAGIC_WED0)
@@ -2455,7 +2824,8 @@
 	 * is from local_out which is also filtered in sanity check.
 	 */
 	dst = skb_dst(skb);
-	if (dst && dst_xfrm(dst))
+	if (dst && dst_xfrm(dst) &&
+	    (!mtk_crypto_offloadable || !mtk_crypto_offloadable(skb)))
 		return 0;
 
 	ct = nf_ct_get(skb, &ctinfo);
@@ -2800,6 +3170,7 @@
 	struct flow_offload_hw_path hw_path = { .dev = (struct net_device*)out,
 						.virt_dev = (struct net_device*)out };
 	const struct net_device *arp_dev = out;
+	bool is_virt_dev = false;
 
 	if (xlat_toggle && !mtk_464xlat_post_process(skb, out))
 		return 0;
@@ -2819,10 +3190,37 @@
 
 	if (out->netdev_ops->ndo_flow_offload_check) {
 		out->netdev_ops->ndo_flow_offload_check(&hw_path);
+
 		out = (IS_GMAC1_MODE) ? hw_path.virt_dev : hw_path.dev;
+		if (hw_path.flags & FLOW_OFFLOAD_PATH_TNL && mtk_tnl_encap_offload) {
+			if (ntohs(skb->protocol) == ETH_P_IP &&
+			    ip_hdr(skb)->protocol == IPPROTO_TCP) {
+				skb_hnat_set_tops(skb, hw_path.tnl_type + 1);
+			} else {
+				/*
+				 * we are not support protocols other than IPv4 TCP
+				 * for tunnel protocol offload yet
+				 */
+				skb_hnat_alg(skb) = 1;
+				return 0;
+			}
+		}
+	}
+
+	/* we are not support protocols other than IPv4 TCP for crypto offload yet */
+	if (skb_hnat_is_decrypt(skb) &&
+	    (ntohs(skb->protocol) != ETH_P_IP ||
+	    ip_hdr(skb)->protocol != IPPROTO_TCP)) {
+		skb_hnat_alg(skb) = 1;
+		return 0;
 	}
 
 	if (!IS_LAN_GRP(out) && !IS_WAN(out) && !IS_EXT(out))
+		is_virt_dev = true;
+
+	if (is_virt_dev
+	    && !(skb_hnat_tops(skb) && skb_hnat_is_encap(skb)
+		 && (hw_path.flags & FLOW_OFFLOAD_PATH_TNL)))
 		return 0;
 
 	if (debug_level >= 7)
@@ -2843,9 +3241,18 @@
 		if (fn && !mtk_hnat_accel_type(skb))
 			break;
 
-		if (fn && fn(skb, arp_dev, &hw_path))
+		if (!is_virt_dev && fn && fn(skb, arp_dev, &hw_path))
 			break;
 
+		/* skb_hnat_tops(skb) is updated in mtk_tnl_offload() */
+		if (skb_hnat_tops(skb)) {
+			if (skb_hnat_is_encap(skb) && !is_virt_dev &&
+			    mtk_tnl_encap_offload && mtk_tnl_encap_offload(skb))
+				break;
+			if (skb_hnat_is_decap(skb))
+				break;
+		}
+
 		skb_to_hnat_info(skb, out, entry, &hw_path);
 		break;
 	case HIT_BIND_KEEPALIVE_DUP_OLD_HDR:
@@ -3116,6 +3523,9 @@
 	if (iph->protocol == IPPROTO_IPV6) {
 		entry->udib1.pkt_type = IPV6_6RD;
 		hnat_set_head_frags(state, skb, 0, hnat_set_alg);
+	} else if (is_magic_tag_valid(skb) &&
+		   ((skb_hnat_cdrt(skb) && skb_hnat_is_encrypt(skb)) || skb_hnat_tops(skb))) {
+		hnat_set_head_frags(state, skb, 0, hnat_set_alg);
 	} else {
 		hnat_set_head_frags(state, skb, 1, hnat_set_alg);
 	}
diff --git a/21.02/files/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_hnat/nf_hnat_mtk.h b/21.02/files/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_hnat/nf_hnat_mtk.h
index 0c4c7ed..4d12010 100644
--- a/21.02/files/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_hnat/nf_hnat_mtk.h
+++ b/21.02/files/target/linux/mediatek/files-5.4/drivers/net/ethernet/mediatek/mtk_hnat/nf_hnat_mtk.h
@@ -44,7 +44,11 @@
 	u32 is_sp : 1;
 	u32 hf : 1;
 	u32 amsdu : 1;
-	u32 resv3 : 19;
+	u32 tops : 6;
+	u32 is_decap : 1;
+	u32 cdrt : 8;
+	u32 is_decrypt : 1;
+	u32 resv3 : 3;
 	u32 magic_tag_protect : 16;
 } __packed;
 #elif defined(CONFIG_MEDIATEK_NETSYS_RX_V2)
@@ -92,6 +96,29 @@
 	((((skb_headroom(skb) >= FOE_INFO_LEN) ? 1 : 0)))
 
 #define skb_hnat_info(skb) ((struct hnat_desc *)(skb->head))
+#if defined(CONFIG_MEDIATEK_NETSYS_V3)
+#define skb_hnat_tops(skb) (((struct hnat_desc *)((skb)->head))->tops)
+#define skb_hnat_is_decap(skb) (((struct hnat_desc *)((skb)->head))->is_decap)
+#define skb_hnat_is_encap(skb) (!skb_hnat_is_decap(skb))
+#define skb_hnat_set_tops(skb, tops) ((skb_hnat_tops(skb)) = (tops))
+#define skb_hnat_set_is_decap(skb, is_decap) ((skb_hnat_is_decap(skb)) = (is_decap))
+#define skb_hnat_cdrt(skb) (((struct hnat_desc *)((skb)->head))->cdrt)
+#define skb_hnat_is_decrypt(skb) (((struct hnat_desc *)((skb)->head))->is_decrypt)
+#define skb_hnat_is_encrypt(skb) (!skb_hnat_is_decrypt(skb))
+#define skb_hnat_set_cdrt(skb, cdrt) ((skb_hnat_cdrt(skb)) = (cdrt))
+#define skb_hnat_set_is_decrypt(skb, is_dec) ((skb_hnat_is_decrypt(skb)) = is_dec)
+#else /* !defined(CONFIG_MEDIATEK_NETSYS_V3) */
+#define skb_hnat_tops(skb) (0)
+#define skb_hnat_is_decap(skb) (0)
+#define skb_hnat_is_encap(skb) (0)
+#define skb_hnat_set_tops(skb, tops)
+#define skb_hnat_set_is_decap(skb, is_decap)
+#define skb_hnat_cdrt(skb) (0)
+#define skb_hnat_is_decrypt(skb) (0)
+#define skb_hnat_is_encrypt(skb) (0)
+#define skb_hnat_set_cdrt(skb, cdrt)
+#define skb_hnat_set_is_decrypt(skb, is_dec)
+#endif /* defined(CONFIG_MEDIATEK_NETSYS_V3) */
 #define skb_hnat_magic(skb) (((struct hnat_desc *)(skb->head))->magic)
 #define skb_hnat_reason(skb) (((struct hnat_desc *)(skb->head))->crsn)
 #define skb_hnat_entry(skb) (((struct hnat_desc *)(skb->head))->entry)
diff --git a/21.02/files/target/linux/mediatek/patches-5.4/999-4102-mtk-crypto-offload-support.patch b/21.02/files/target/linux/mediatek/patches-5.4/999-4102-mtk-crypto-offload-support.patch
deleted file mode 100644
index a084507..0000000
--- a/21.02/files/target/linux/mediatek/patches-5.4/999-4102-mtk-crypto-offload-support.patch
+++ /dev/null
@@ -1,407 +0,0 @@
---- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
-+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
-@@ -1990,6 +1990,7 @@ static void mtk_tx_set_dma_desc_v3(struc
- 	u32 data = 0;
- 	u8 tops_entry  = 0;
- 	u8 tport = 0;
-+	u8 cdrt = 0;
- 
- 	WRITE_ONCE(desc->txd1, info->addr);
- 
-@@ -2014,11 +2015,25 @@ static void mtk_tx_set_dma_desc_v3(struc
- 	trace_printk("[%s] skb_shinfo(skb)->nr_frags=%x HNAT_SKB_CB2(skb)->magic=%x txd4=%x<-----\n",
- 		     __func__, skb_shinfo(skb)->nr_frags, HNAT_SKB_CB2(skb)->magic, data);
- #endif
--	if (mtk_get_tnl_netsys_params && skb) {
-+	if (mtk_get_tnl_netsys_params && skb && !(skb->inner_protocol == IPPROTO_ESP)) {
- 		u32 params = mtk_get_tnl_netsys_params(skb);
- 
- 		tops_entry = params & 0x000000FF;
- 		tport = (params & 0x0000FF00) >> 8;
-+		cdrt = (params & 0x00FF0000) >> 16;
-+	}
-+	/* forward to eip197 if this packet is going to encrypt */
-+#if defined(CONFIG_NET_MEDIATEK_HNAT) || defined(CONFIG_NET_MEDIATEK_HNAT_MODULE)
-+	else if (unlikely(skb->inner_protocol == IPPROTO_ESP && skb_hnat_cdrt(skb) && is_magic_tag_valid(skb))) {
-+		/* carry cdrt index for encryption */
-+		cdrt = skb_hnat_cdrt(skb);
-+		skb_hnat_magic_tag(skb) = 0;
-+#else
-+	else if (unlikely(skb->inner_protocol == IPPROTO_ESP && skb_tnl_cdrt(skb) && is_tnl_tag_valid(skb))) {
-+		cdrt = skb_tnl_cdrt(skb);
-+		skb_tnl_magic_tag(skb) = 0;
-+#endif
-+		tport = EIP197_QDMA_TPORT;
- 	}
- 
- 	if (tport) {
-@@ -2055,6 +2070,11 @@ static void mtk_tx_set_dma_desc_v3(struc
- 		data |= (tops_entry & TX_DMA_TOPS_ENTRY_MASK) << TX_DMA_TOPS_ENTRY_SHIFT;
- 	}
- 
-+	if (cdrt) {
-+		data &= ~(TX_DMA_CDRT_MASK << TX_DMA_CDRT_SHIFT);
-+		data |= (cdrt & TX_DMA_CDRT_MASK) << TX_DMA_CDRT_SHIFT;
-+	}
-+
- 	WRITE_ONCE(desc->txd8, data);
- }
- 
-@@ -2568,6 +2588,7 @@ static int mtk_poll_rx(struct napi_struc
- 
- 		skb_hnat_alg(skb) = 0;
- 		skb_hnat_filled(skb) = 0;
-+		skb_hnat_set_cdrt(skb, 0);
- 		skb_hnat_magic_tag(skb) = HNAT_MAGIC_TAG;
- 		skb_hnat_set_tops(skb, 0);
- 		skb_hnat_set_is_decap(skb, 0);
---- a/drivers/net/ethernet/mediatek/mtk_hnat/hnat_nf_hook.c
-+++ b/drivers/net/ethernet/mediatek/mtk_hnat/hnat_nf_hook.c
-@@ -1091,6 +1091,9 @@ static unsigned int hnat_ipv4_get_nextho
- 		return 0;
- 	}
- 
-+	if (!skb_hnat_cdrt(skb) && dst && dst_xfrm(dst))
-+		return 0;
-+
- 	rcu_read_lock_bh();
- 	nexthop = (__force u32)rt_nexthop(rt, ip_hdr(skb)->daddr);
- 	neigh = __ipv4_neigh_lookup_noref(dev, nexthop);
-@@ -1298,6 +1301,9 @@ static inline void hnat_fill_offload_eng
- 		 */
- 		entry->ipv4_hnapt.tport_id = NR_TDMA_QDMA_TPORT;
- 		entry->ipv4_hnapt.tops_entry = skb_hnat_tops(skb);
-+	} else if (skb_hnat_cdrt(skb)) {
-+		entry->ipv4_hnapt.tport_id = NR_EIP197_QDMA_TPORT;
-+		entry->ipv4_hnapt.cdrt_id = skb_hnat_cdrt(skb);
- 	} else {
- 		return;
- 	}
-@@ -1307,6 +1313,158 @@ static inline void hnat_fill_offload_eng
- #endif /* defined(CONFIG_MEDIATEK_NETSYS_V3) */
- }
- 
-+int hnat_bind_crypto_entry(struct sk_buff *skb, const struct net_device *dev, int fill_inner_info) {
-+	struct foe_entry *foe;
-+	struct foe_entry entry = { 0 };
-+	struct ethhdr *eth = eth_hdr(skb);
-+	struct iphdr *iph;
-+	struct tcpudphdr _ports;
-+	const struct tcpudphdr *pptr;
-+	u32 gmac = NR_DISCARD;
-+	int udp = 0;
-+	struct mtk_mac *mac = netdev_priv(dev);
-+	struct flow_offload_hw_path hw_path = { .dev = (struct net_device *) dev,
-+						.virt_dev = (struct net_device *) dev };
-+
-+	if (!skb_hnat_is_hashed(skb) || skb_hnat_ppe(skb) >= CFG_PPE_NUM)
-+		return 0;
-+
-+	foe = &hnat_priv->foe_table_cpu[skb_hnat_ppe(skb)][skb_hnat_entry(skb)];
-+
-+	if (entry_hnat_is_bound(foe))
-+		return 0;
-+
-+	if (eth->h_proto != htons(ETH_P_IP))
-+		return 0;
-+
-+	if (skb_hnat_tops(skb) && mtk_tnl_encap_offload)
-+		mtk_tnl_encap_offload(skb);
-+
-+	hnat_get_filled_unbind_entry(skb, &entry);
-+
-+	if (dev->netdev_ops->ndo_flow_offload_check)
-+		dev->netdev_ops->ndo_flow_offload_check(&hw_path);
-+
-+	/* For packets pass through VTI (route-based IPSec),
-+	 * We need to fill the inner packet info into hnat entry.
-+	 * Since the skb->mac_header is not pointed to correct position
-+	 * in skb_to_hnat_info().
-+	 */
-+	if (fill_inner_info) {
-+		iph = ip_hdr(skb);
-+		switch (iph->protocol) {
-+		case IPPROTO_UDP:
-+			udp = 1;
-+			/* fallthrough */
-+		case IPPROTO_TCP:
-+			entry.ipv4_hnapt.etype = htons(ETH_P_IP);
-+			if (IS_IPV4_GRP(&entry)) {
-+				entry.ipv4_hnapt.iblk2.dscp = iph->tos;
-+				if (hnat_priv->data->per_flow_accounting)
-+					entry.ipv4_hnapt.iblk2.mibf = 1;
-+
-+				entry.ipv4_hnapt.vlan1 = hw_path.vlan_id;
-+
-+				if (skb_vlan_tagged(skb)) {
-+					entry.bfib1.vlan_layer += 1;
-+
-+					if (entry.ipv4_hnapt.vlan1)
-+						entry.ipv4_hnapt.vlan2 =
-+							skb->vlan_tci;
-+					else
-+						entry.ipv4_hnapt.vlan1 =
-+							skb->vlan_tci;
-+				}
-+
-+				entry.ipv4_hnapt.sip = foe->ipv4_hnapt.sip;
-+				entry.ipv4_hnapt.dip = foe->ipv4_hnapt.dip;
-+				entry.ipv4_hnapt.sport = foe->ipv4_hnapt.sport;
-+				entry.ipv4_hnapt.dport = foe->ipv4_hnapt.dport;
-+
-+				entry.ipv4_hnapt.new_sip = ntohl(iph->saddr);
-+				entry.ipv4_hnapt.new_dip = ntohl(iph->daddr);
-+
-+				if (IS_IPV4_HNAPT(&entry)) {
-+					pptr = skb_header_pointer(skb, iph->ihl * 4 + ETH_HLEN,
-+								sizeof(_ports),
-+								&_ports);
-+					if (unlikely(!pptr))
-+						return -1;
-+
-+					entry.ipv4_hnapt.new_sport = ntohs(pptr->src);
-+					entry.ipv4_hnapt.new_dport = ntohs(pptr->dst);
-+				}
-+			} else
-+				return 0;
-+
-+			entry.ipv4_hnapt.bfib1.udp = udp;
-+
-+#if defined(CONFIG_MEDIATEK_NETSYS_V3)
-+				entry.ipv4_hnapt.eg_keep_ecn = 1;
-+				entry.ipv4_hnapt.eg_keep_dscp = 1;
-+#endif
-+			break;
-+
-+		default:
-+			return -1;
-+		}
-+	}
-+
-+	entry = ppe_fill_info_blk(eth, entry, &hw_path);
-+
-+	if (IS_LAN(dev)) {
-+		if (IS_BOND(dev))
-+			gmac = ((skb_hnat_entry(skb) >> 1) % hnat_priv->gmac_num) ?
-+				 NR_GMAC2_PORT : NR_GMAC1_PORT;
-+		else
-+			gmac = NR_GMAC1_PORT;
-+	} else if (IS_LAN2(dev)) {
-+		gmac = (mac->id == MTK_GMAC2_ID) ? NR_GMAC2_PORT : NR_GMAC3_PORT;
-+	} else if (IS_WAN(dev)) {
-+		if (IS_GMAC1_MODE)
-+			gmac = NR_GMAC1_PORT;
-+		else
-+			gmac = (mac->id == MTK_GMAC2_ID) ? NR_GMAC2_PORT : NR_GMAC3_PORT;
-+	} else {
-+		pr_notice("Unknown case of dp, iif=%x --> %s\n", skb_hnat_iface(skb), dev->name);
-+		return -1;
-+	}
-+
-+	entry.ipv4_hnapt.iblk2.mibf = 1;
-+	entry.ipv4_hnapt.iblk2.dp = gmac;
-+	entry.ipv4_hnapt.iblk2.port_mg =
-+		(hnat_priv->data->version == MTK_HNAT_V1_1) ? 0x3f : 0;
-+	entry.bfib1.ttl = 1;
-+	entry.bfib1.state = BIND;
-+
-+	hnat_fill_offload_engine_entry(skb, &entry, dev);
-+
-+	if (!skb_hnat_tops(skb)) {
-+		entry.ipv4_hnapt.dmac_hi = swab32(*((u32 *)eth->h_dest));
-+		entry.ipv4_hnapt.dmac_lo = swab16(*((u16 *)&eth->h_dest[4]));
-+		entry.ipv4_hnapt.smac_hi = swab32(*((u32 *)eth->h_source));
-+		entry.ipv4_hnapt.smac_lo = swab16(*((u16 *)&eth->h_source[4]));
-+	}
-+
-+	wmb();
-+
-+	if (entry_hnat_is_bound(foe))
-+		return 0;
-+
-+	spin_lock(&hnat_priv->entry_lock);
-+	memcpy(foe, &entry, sizeof(entry));
-+	spin_unlock(&hnat_priv->entry_lock);
-+
-+	if (hnat_priv->data->per_flow_accounting &&
-+	    skb_hnat_entry(skb) < hnat_priv->foe_etry_num &&
-+	    skb_hnat_ppe(skb) < CFG_PPE_NUM)
-+		memset(&hnat_priv->acct[skb_hnat_ppe(skb)][skb_hnat_entry(skb)],
-+		       0, sizeof(struct mib_entry));
-+
-+	return 0;
-+}
-+EXPORT_SYMBOL(hnat_bind_crypto_entry);
-+
- static unsigned int skb_to_hnat_info(struct sk_buff *skb,
- 				     const struct net_device *dev,
- 				     struct foe_entry *foe,
-@@ -2438,6 +2596,7 @@ int mtk_sw_nat_hook_rx(struct sk_buff *s
- 	skb_hnat_alg(skb) = 0;
- 	skb_hnat_filled(skb) = 0;
- 	skb_hnat_set_tops(skb, 0);
-+	skb_hnat_set_cdrt(skb, 0);
- 	skb_hnat_magic_tag(skb) = HNAT_MAGIC_TAG;
- 
- 	if (skb_hnat_iface(skb) == FOE_MAGIC_WED0)
-@@ -2524,7 +2683,8 @@ static unsigned int mtk_hnat_accel_type(
- 	 * is from local_out which is also filtered in sanity check.
- 	 */
- 	dst = skb_dst(skb);
--	if (dst && dst_xfrm(dst))
-+	if (dst && dst_xfrm(dst)
-+	    && (!mtk_crypto_offloadable || !mtk_crypto_offloadable(skb)))
- 		return 0;
- 
- 	ct = nf_ct_get(skb, &ctinfo);
-@@ -2906,6 +3066,14 @@ static unsigned int mtk_hnat_nf_post_rou
- 		}
- 	}
- 
-+	/* we are not support protocols other than IPv4 TCP for crypto offload yet */
-+	if (skb_hnat_is_decrypt(skb)
-+	    && (ntohs(skb->protocol) != ETH_P_IP
-+		|| ip_hdr(skb)->protocol != IPPROTO_TCP)) {
-+		skb_hnat_alg(skb) = 1;
-+		return 0;
-+	}
-+
- 	if (!IS_LAN_GRP(out) && !IS_WAN(out) && !IS_EXT(out))
- 		is_virt_dev = true;
- 
-@@ -3213,7 +3381,10 @@ mtk_hnat_ipv4_nf_local_out(void *priv, s
- 	if (iph->protocol == IPPROTO_IPV6) {
- 		entry->udib1.pkt_type = IPV6_6RD;
- 		hnat_set_head_frags(state, skb, 0, hnat_set_alg);
--	} else if (!skb_hnat_tops(skb)) {
-+	} else if (is_magic_tag_valid(skb)
-+		   && (skb_hnat_cdrt(skb) || skb_hnat_tops(skb))) {
-+		hnat_set_head_frags(state, skb, 0, hnat_set_alg);
-+	} else {
- 		hnat_set_head_frags(state, skb, 1, hnat_set_alg);
- 	}
- 
---- a/drivers/net/ethernet/mediatek/mtk_hnat/nf_hnat_mtk.h
-+++ b/drivers/net/ethernet/mediatek/mtk_hnat/nf_hnat_mtk.h
-@@ -46,7 +46,8 @@ struct hnat_desc {
- 	u32 amsdu : 1;
- 	u32 tops : 6;
- 	u32 is_decap : 1;
--	u32 resv3 : 12;
-+	u32 cdrt : 8;
-+	u32 resv3 : 4;
- 	u32 magic_tag_protect : 16;
- } __packed;
- #elif defined(CONFIG_MEDIATEK_NETSYS_RX_V2)
-@@ -100,12 +101,16 @@ struct hnat_desc {
- #define skb_hnat_is_encap(skb) (!skb_hnat_is_decap(skb))
- #define skb_hnat_set_tops(skb, tops) ((skb_hnat_tops(skb)) = (tops))
- #define skb_hnat_set_is_decap(skb, is_decap) ((skb_hnat_is_decap(skb)) = (is_decap))
-+#define skb_hnat_cdrt(skb) (((struct hnat_desc *)((skb)->head))->cdrt)
-+#define skb_hnat_set_cdrt(skb, cdrt) ((skb_hnat_cdrt(skb)) = (cdrt))
- #else /* !defined(CONFIG_MEDIATEK_NETSYS_V3) */
- #define skb_hnat_tops(skb) (0)
- #define skb_hnat_is_decap(skb) (0)
- #define skb_hnat_is_encap(skb) (0)
- #define skb_hnat_set_tops(skb, tops)
- #define skb_hnat_set_is_decap(skb, is_decap)
-+#define skb_hnat_cdrt(skb) (0)
-+#define skb_hnat_set_cdrt(skb, cdrt)
- #endif /* defined(CONFIG_MEDIATEK_NETSYS_V3) */
- #define skb_hnat_magic(skb) (((struct hnat_desc *)(skb->head))->magic)
- #define skb_hnat_reason(skb) (((struct hnat_desc *)(skb->head))->crsn)
---- a/drivers/net/ethernet/mediatek/mtk_hnat/hnat.c
-+++ b/drivers/net/ethernet/mediatek/mtk_hnat/hnat.c
-@@ -57,6 +57,8 @@ int (*mtk_tnl_decap_offload)(struct sk_b
- EXPORT_SYMBOL(mtk_tnl_decap_offload);
- bool (*mtk_tnl_decap_offloadable)(struct sk_buff *skb) = NULL;
- EXPORT_SYMBOL(mtk_tnl_decap_offloadable);
-+bool (*mtk_crypto_offloadable)(struct sk_buff *skb) = NULL;
-+EXPORT_SYMBOL(mtk_crypto_offloadable);
- 
- int (*hnat_set_wdma_pse_port_state)(u32 wdma_idx, bool up) = NULL;
- EXPORT_SYMBOL(hnat_set_wdma_pse_port_state);
---- a/drivers/net/ethernet/mediatek/mtk_hnat/hnat.h
-+++ b/drivers/net/ethernet/mediatek/mtk_hnat/hnat.h
-@@ -1153,6 +1153,8 @@ enum FoeIpAct {
- #define NR_WDMA2_PORT 13
- #define NR_GMAC3_PORT 15
- #define NR_QDMA_TPORT 1
-+#define NR_EIP197_TPORT 2
-+#define NR_EIP197_QDMA_TPORT 3
- #define NR_TDMA_TPORT 4
- #define NR_TDMA_QDMA_TPORT 5
- #define LAN_DEV_NAME hnat_priv->lan
-@@ -1334,6 +1336,9 @@ extern int qos_toggle;
- extern int (*mtk_tnl_encap_offload)(struct sk_buff *skb);
- extern int (*mtk_tnl_decap_offload)(struct sk_buff *skb);
- extern bool (*mtk_tnl_decap_offloadable)(struct sk_buff *skb);
-+extern bool (*mtk_crypto_offloadable)(struct sk_buff *skb);
-+extern int hnat_bind_crypto_entry(struct sk_buff *skb, const struct net_device *dev,
-+					int fill_inner_info);
- 
- int ext_if_add(struct extdev_entry *ext_entry);
- int ext_if_del(struct extdev_entry *ext_entry);
---- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
-+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
-@@ -576,6 +576,8 @@
- #define MTK_QDMA_GMAC3_QID	6
- 
- /* QDMA V2 descriptor txd8 */
-+#define TX_DMA_CDRT_SHIFT          0
-+#define TX_DMA_CDRT_MASK           0xff
- #define TX_DMA_TOPS_ENTRY_SHIFT    8
- #define TX_DMA_TOPS_ENTRY_MASK     0x3f
- 
-@@ -588,6 +590,7 @@
- #define TX_DMA_SPTAG_V3            BIT(27)
- 
- /* QDMA V2 descriptor txd4 */
-+#define EIP197_QDMA_TPORT          3
- #define TX_DMA_TPORT_SHIFT         0
- #define TX_DMA_TPORT_MASK          0xf
- #define TX_DMA_FPORT_SHIFT_V2      8
-@@ -1075,6 +1078,43 @@
- #define MT7628_SDM_MAC_ADRL	(MT7628_SDM_OFFSET + 0x0c)
- #define MT7628_SDM_MAC_ADRH	(MT7628_SDM_OFFSET + 0x10)
- 
-+#if !defined(CONFIG_NET_MEDIATEK_HNAT) && !defined(CONFIG_NET_MEDIATEK_HNAT_MODULE)
-+#if defined(CONFIG_MEDIATEK_NETSYS_V3)
-+struct tnl_desc {
-+	u32 entry : 15;
-+	u32 filled : 3;
-+	u32 crsn : 5;
-+	u32 resv1 : 3;
-+	u32 sport : 4;
-+	u32 resv2 : 1;
-+	u32 alg : 1;
-+	u32 iface : 8;
-+	u32 wdmaid : 2;
-+	u32 rxid : 2;
-+	u32 wcid : 16;
-+	u32 bssid : 8;
-+	u32 usr_info : 16;
-+	u32 tid : 4;
-+	u32 is_fixedrate : 1;
-+	u32 is_prior : 1;
-+	u32 is_sp : 1;
-+	u32 hf : 1;
-+	u32 amsdu : 1;
-+	u32 tops : 6;
-+	u32 is_decap : 1;
-+	u32 cdrt : 8;
-+	u32 resv3 : 4;
-+	u32 magic_tag_protect : 16;
-+} __packed;
-+
-+#define TNL_MAGIC_TAG 0x6789
-+#define skb_tnl_cdrt(skb) (((struct tnl_desc *)((skb)->head))->cdrt)
-+#define skb_tnl_set_cdrt(skb, cdrt) ((skb_tnl_cdrt(skb)) = (cdrt))
-+#define skb_tnl_magic_tag(skb) (((struct tnl_desc *)((skb)->head))->magic_tag_protect)
-+#define is_tnl_tag_valid(skb) (skb_tnl_magic_tag(skb) == TNL_MAGIC_TAG)
-+#endif // NetsysV3
-+#endif // hnat
-+
- struct mtk_rx_dma {
- 	unsigned int rxd1;
- 	unsigned int rxd2;