[][openwrt][mt7988][tops][tnl-offload: support with crypto-eip]

[Description]
Add tunnel offload with crypto-eip support.

Tunnel parameter now records CDRT index to provide help for tunnel
decapsulation and encapsulation offload setup flow.

For decapsulation, If skb already carried with CDRT information, it means
that packet is already decrypted by HW offload engine crypto-eip and there
is already a CLS rule setup for that packet. Therefore, we should not
modify the exist CLS rule. Instead, we just need to update the TPORT to
that CLS entry.

For encapsulation, there is nothing more need to do.

[Release-log]
N/A

Change-Id: Ib4ff408836fa0ed16895fc486c5ebdee90cfa0b6
Reviewed-on: https://gerrit.mediatek.inc/c/openwrt/feeds/mtk_openwrt_feeds/+/7923905
diff --git a/package-21.02/kernel/tops/src/inc/tunnel.h b/package-21.02/kernel/tops/src/inc/tunnel.h
index 387d923..d00eebe 100644
--- a/package-21.02/kernel/tops/src/inc/tunnel.h
+++ b/package-21.02/kernel/tops/src/inc/tunnel.h
@@ -127,6 +127,7 @@
 	u16 protocol;
 	u8 tops_entry_proto;
 	u8 cls_entry;
+	u8 cdrt;
 	u8 flag; /* bit: enum tops_tnl_params_flag */
 	union {
 		struct l2tp_param l2tp; /* 4B */
@@ -147,6 +148,13 @@
 	u32 flag; /* bit: enum tops_tnl_info_flag */
 } __aligned(16);
 
+/*
+ * tnl_l2_param_update:
+ *	update tunnel l2 info only
+ *	return 1 on l2 params have difference
+ *	return 0 on l2 params are the same
+ *	return negative value on error
+ */
 struct tops_tnl_type {
 	const char *type_name;
 	enum tops_entry_type tops_entry;
@@ -162,6 +170,8 @@
 				     struct tops_tnl_params *tnl_params);
 	int (*tnl_debug_param_setup)(const char *buf, int *ofs,
 				     struct tops_tnl_params *tnl_params);
+	int (*tnl_l2_param_update)(struct sk_buff *skb,
+				   struct tops_tnl_params *tnl_params);
 	int (*tnl_dump_param)(char *buf, struct tops_tnl_params *tnl_params);
 	bool (*tnl_info_match)(struct tops_tnl_params *params1,
 			       struct tops_tnl_params *params2);
diff --git a/package-21.02/kernel/tops/src/tnl_offload.c b/package-21.02/kernel/tops/src/tnl_offload.c
index c3b2cdf..bd37faa 100644
--- a/package-21.02/kernel/tops/src/tnl_offload.c
+++ b/package-21.02/kernel/tops/src/tnl_offload.c
@@ -22,6 +22,8 @@
 #include <mtk_hnat/hnat.h>
 #include <mtk_hnat/nf_hnat_mtk.h>
 
+#include <pce/cdrt.h>
+#include <pce/cls.h>
 #include <pce/dipfilter.h>
 #include <pce/pce.h>
 
@@ -123,7 +125,7 @@
 
 static inline bool skb_tops_valid(struct sk_buff *skb)
 {
-	return (skb && skb_hnat_tops(skb) <= __TOPS_ENTRY_MAX);
+	return (skb && skb_hnat_tops(skb) < __TOPS_ENTRY_MAX);
 }
 
 static inline struct tops_tnl_type *skb_to_tnl_type(struct sk_buff *skb)
@@ -139,6 +141,19 @@
 	return tnl_type ? tnl_type : ERR_PTR(-ENODEV);
 }
 
+static inline struct tops_tnl_info *skb_to_tnl_info(struct sk_buff *skb)
+{
+	u32 tnl_idx = skb_hnat_tops(skb) - __TOPS_ENTRY_MAX;
+
+	if (tnl_idx >= CONFIG_TOPS_TNL_NUM)
+		return ERR_PTR(-EINVAL);
+
+	if (!test_bit(tnl_idx, tops_tnl.tnl_used))
+		return ERR_PTR(-EACCES);
+
+	return &tops_tnl.tnl_infos[tnl_idx];
+}
+
 static inline void skb_mark_unbind(struct sk_buff *skb)
 {
 	skb_hnat_tops(skb) = 0;
@@ -331,25 +346,30 @@
 	unsigned long flag;
 
 	tnl_info->tnl_params.cls_entry = tnl_info->tcls->cls->idx;
-	TOPS_NOTICE("cls entry: %u\n", tnl_info->tcls->cls->idx);
 
 	spin_lock_irqsave(&tnl_info->lock, flag);
 	tnl_info->cache.cls_entry = tnl_info->tcls->cls->idx;
 	spin_unlock_irqrestore(&tnl_info->lock, flag);
 }
 
-static void mtk_tops_tnl_info_cls_entry_unprepare(struct tops_tnl_info *tnl_info)
+static void mtk_tops_tnl_info_cls_entry_unprepare(struct tops_tnl_info *tnl_info,
+						  struct tops_tnl_params *tnl_params)
 {
 	struct tops_cls_entry *tcls = tnl_info->tcls;
 
-	pr_notice("cls entry unprepare\n");
 	tnl_info->tcls = NULL;
 
 	if (refcount_dec_and_test(&tcls->refcnt)) {
-		pr_notice("cls entry delete\n");
 		list_del(&tcls->node);
 
-		memset(&tcls->cls->cdesc, 0, sizeof(tcls->cls->cdesc));
+		if (!tnl_params->cdrt)
+			memset(&tcls->cls->cdesc, 0, sizeof(tcls->cls->cdesc));
+		else
+			/*
+			 * recover tport_ix to let match packets to
+			 * go through EIP197 only
+			 */
+			CLS_DESC_DATA(&tcls->cls->cdesc, tport_idx, 2);
 
 		mtk_pce_cls_entry_write(tcls->cls);
 
@@ -360,7 +380,8 @@
 }
 
 static struct tops_cls_entry *
-mtk_tops_tnl_info_cls_entry_prepare(struct tops_tnl_info *tnl_info)
+mtk_tops_tnl_info_cls_entry_prepare(struct tops_tnl_info *tnl_info,
+				    struct tops_tnl_params *tnl_params)
 {
 	struct tops_cls_entry *tcls;
 	int ret;
@@ -369,10 +390,21 @@
 	if (!tcls)
 		return ERR_PTR(-ENOMEM);
 
+	if (!tnl_params->cdrt) {
+		tcls->cls = mtk_pce_cls_entry_alloc();
+		if (IS_ERR(tcls->cls)) {
+			ret = PTR_ERR(tcls->cls);
+			goto free_tcls;
+		}
+	} else {
+		struct cdrt_entry *cdrt = mtk_pce_cdrt_entry_find(tnl_params->cdrt);
+
-	tcls->cls = mtk_pce_cls_entry_alloc();
-	if (IS_ERR(tcls->cls)) {
-		ret = PTR_ERR(tcls->cls);
-		goto free_tcls;
+		if (IS_ERR(cdrt)) {
+			ret = PTR_ERR(cdrt);
+			goto free_tcls;
+		}
+
+		tcls->cls = cdrt->cls;
 	}
 
 	INIT_LIST_HEAD(&tcls->node);
@@ -397,10 +429,8 @@
 		return -EINVAL;
 
 	ret = mtk_pce_cls_entry_write(tnl_info->tcls->cls);
-	if (ret) {
-		mtk_tops_tnl_info_cls_entry_unprepare(tnl_info);
+	if (ret)
 		return ret;
-	}
 
 	tnl_info->tcls->updated = true;
 
@@ -409,9 +439,10 @@
 	return 0;
 }
 
-static int mtk_tops_tnl_info_cls_tear_down(struct tops_tnl_info *tnl_info)
+static int mtk_tops_tnl_info_cls_tear_down(struct tops_tnl_info *tnl_info,
+					   struct tops_tnl_params *tnl_params)
 {
-	mtk_tops_tnl_info_cls_entry_unprepare(tnl_info);
+	mtk_tops_tnl_info_cls_entry_unprepare(tnl_info, tnl_params);
 
 	return 0;
 }
@@ -452,6 +483,7 @@
 }
 
 static int mtk_tops_tnl_info_cls_single_setup(struct tops_tnl_info *tnl_info,
+					      struct tops_tnl_params *tnl_params,
 					      struct tops_tnl_type *tnl_type)
 {
 	struct tops_cls_entry *tcls;
@@ -461,20 +493,34 @@
 		return 0;
 
 	if (tnl_info->tcls)
-		return mtk_tops_tnl_info_cls_entry_write(tnl_info);
+		goto cls_entry_write;
 
-	tcls = mtk_tops_tnl_info_cls_entry_prepare(tnl_info);
+	tcls = mtk_tops_tnl_info_cls_entry_prepare(tnl_info, tnl_params);
 	if (IS_ERR(tcls))
 		return PTR_ERR(tcls);
 
-	ret = tnl_type->cls_entry_setup(tnl_info, &tcls->cls->cdesc);
-	if (ret) {
-		TOPS_ERR("tops cls entry setup failed: %d\n", ret);
-		mtk_tops_tnl_info_cls_entry_unprepare(tnl_info);
-		return ret;
+	if (!tnl_params->cdrt) {
+		ret = tnl_type->cls_entry_setup(tnl_info, &tcls->cls->cdesc);
+		if (ret) {
+			TOPS_ERR("tops cls entry setup failed: %d\n", ret);
+			goto cls_entry_unprepare;
+		}
+	} else {
+		/*
+		 * since CLS is already filled up with outer protocol rule
+		 * we only update CLS tport here to let matched packet stop by TOPS
+		 */
+		CLS_DESC_DATA(&tcls->cls->cdesc, tport_idx, 0x7);
 	}
 
+cls_entry_write:
+	ret = mtk_tops_tnl_info_cls_entry_write(tnl_info);
+
+cls_entry_unprepare:
+	if (ret)
+		mtk_tops_tnl_info_cls_entry_unprepare(tnl_info, tnl_params);
+
-	return mtk_tops_tnl_info_cls_entry_write(tnl_info);
+	return ret;
 }
 
 static struct tops_cls_entry *
@@ -490,7 +536,7 @@
 	return NULL;
 }
 
-static bool mtk_tops_tnl_infO_cls_multi_is_updated(struct tops_tnl_info *tnl_info,
+static bool mtk_tops_tnl_info_cls_multi_is_updated(struct tops_tnl_info *tnl_info,
 						   struct tops_tnl_type *tnl_type,
 						   struct cls_desc *cdesc)
 {
@@ -520,19 +566,35 @@
 }
 
 static int mtk_tops_tnl_info_cls_multi_setup(struct tops_tnl_info *tnl_info,
+					     struct tops_tnl_params *tnl_params,
 					     struct tops_tnl_type *tnl_type)
 {
 	struct tops_cls_entry *tcls;
 	struct cls_desc cdesc;
+
 	int ret;
 
-	memset(&cdesc, 0, sizeof(struct cls_desc));
+	if (!tnl_params->cdrt) {
+		memset(&cdesc, 0, sizeof(struct cls_desc));
 
-	/* prepare cls_desc from tnl_type */
-	ret = tnl_type->cls_entry_setup(tnl_info, &cdesc);
-	if (ret) {
-		TOPS_ERR("tops cls entry setup failed: %d\n", ret);
-		return ret;
+		/* prepare cls_desc from tnl_type */
+		ret = tnl_type->cls_entry_setup(tnl_info, &cdesc);
+		if (ret) {
+			TOPS_ERR("tops cls entry setup failed: %d\n", ret);
+			return ret;
+		}
+	} else {
+		struct cdrt_entry *cdrt = mtk_pce_cdrt_entry_find(tnl_params->cdrt);
+
+		if (IS_ERR(cdrt)) {
+			TOPS_ERR("no cdrt idx: %u related CDRT found\n",
+				 tnl_params->cdrt);
+			return ret;
+		}
+
+		memcpy(&cdesc, &cdrt->cls->cdesc, sizeof(struct cls_desc));
+
+		CLS_DESC_DATA(&cdesc, tport_idx, 0x7);
 	}
 
 	/*
@@ -544,7 +606,7 @@
 	 * a tcls is not yet updated or
 	 * tnl_info is not yet associated to a tcls
 	 */
-	if (mtk_tops_tnl_infO_cls_multi_is_updated(tnl_info, tnl_type, &cdesc))
+	if (mtk_tops_tnl_info_cls_multi_is_updated(tnl_info, tnl_type, &cdesc))
 		return 0;
 
 	/* tcls is not yet updated, update this tcls */
@@ -552,16 +614,21 @@
 		return mtk_tops_tnl_info_cls_entry_write(tnl_info);
 
 	/* create a new tcls entry and associate with tnl_info */
-	tcls = mtk_tops_tnl_info_cls_entry_prepare(tnl_info);
+	tcls = mtk_tops_tnl_info_cls_entry_prepare(tnl_info, tnl_params);
 	if (IS_ERR(tcls))
 		return PTR_ERR(tcls);
 
 	memcpy(&tcls->cls->cdesc, &cdesc, sizeof(struct cls_desc));
 
-	return mtk_tops_tnl_info_cls_entry_write(tnl_info);
+	ret = mtk_tops_tnl_info_cls_entry_write(tnl_info);
+	if (ret)
+		mtk_tops_tnl_info_cls_entry_unprepare(tnl_info, tnl_params);
+
+	return ret;
 }
 
-static int mtk_tops_tnl_info_cls_setup(struct tops_tnl_info *tnl_info)
+static int mtk_tops_tnl_info_cls_setup(struct tops_tnl_info *tnl_info,
+				       struct tops_tnl_params *tnl_params)
 {
 	struct tops_tnl_type *tnl_type;
 
@@ -573,9 +640,11 @@
 		return -EINVAL;
 
 	if (!tnl_type->use_multi_cls)
-		return mtk_tops_tnl_info_cls_single_setup(tnl_info, tnl_type);
+		return mtk_tops_tnl_info_cls_single_setup(tnl_info,
+							  tnl_params,
+							  tnl_type);
 
-	return mtk_tops_tnl_info_cls_multi_setup(tnl_info, tnl_type);
+	return mtk_tops_tnl_info_cls_multi_setup(tnl_info, tnl_params, tnl_type);
 }
 
 static int mtk_tops_tnl_info_dipfilter_tear_down(struct tops_tnl_info *tnl_info)
@@ -712,8 +781,11 @@
 
 	lockdep_assert_held(&tnl_info->lock);
 
+	/* manually preserve essential data among encapsulation and decapsulation */
 	tnl_params->flag |= tnl_info->cache.flag;
 	tnl_params->cls_entry = tnl_info->cache.cls_entry;
+	if (tnl_info->cache.cdrt)
+		tnl_params->cdrt = tnl_info->cache.cdrt;
 
 	if (memcmp(&tnl_info->cache, tnl_params, sizeof(struct tops_tnl_params))) {
 		memcpy(&tnl_info->cache, tnl_params, sizeof(struct tops_tnl_params));
@@ -876,6 +948,34 @@
 	return ret;
 }
 
+static int mtk_tops_tnl_l2_update(struct sk_buff *skb)
+{
+	struct tops_tnl_info *tnl_info = skb_to_tnl_info(skb);
+	struct tops_tnl_type *tnl_type;
+	unsigned long flag;
+	int ret;
+
+	if (IS_ERR(tnl_info))
+		return PTR_ERR(tnl_info);
+
+	tnl_type = tnl_info->tnl_type;
+	if (!tnl_type->tnl_l2_param_update)
+		return -ENODEV;
+
+	spin_lock_irqsave(&tnl_info->lock, flag);
+
+	ret = tnl_type->tnl_l2_param_update(skb, &tnl_info->cache);
+	/* tnl params need to be updated */
+	if (ret == 1) {
+		mtk_tops_tnl_info_submit_no_tnl_lock(tnl_info);
+		ret = 0;
+	}
+
+	spin_unlock_irqrestore(&tnl_info->lock, flag);
+
+	return ret;
+}
+
 static bool mtk_tops_tnl_decap_offloadable(struct sk_buff *skb)
 {
 	struct tops_tnl_type *tnl_type;
@@ -962,6 +1062,7 @@
 	}
 
 	tnl_params.tops_entry_proto = tnl_type->tops_entry;
+	tnl_params.cdrt = skb_hnat_cdrt(skb);
 
 	ret = mtk_tops_tnl_offload(skb, tnl_type, &tnl_params);
 
@@ -976,20 +1077,12 @@
 	return ret;
 }
 
-static int mtk_tops_tnl_encap_offload(struct sk_buff *skb)
+static int __mtk_tops_tnl_encap_offload(struct sk_buff *skb)
 {
 	struct tops_tnl_params tnl_params;
 	struct tops_tnl_type *tnl_type;
 	int ret;
 
-	if (unlikely(!mtk_tops_mcu_alive())) {
-		skb_mark_unbind(skb);
-		return -EAGAIN;
-	}
-
-	if (unlikely(!skb_tops_valid(skb) || !skb_hnat_is_encap(skb)))
-		return -EPERM;
-
 	tnl_type = skb_to_tnl_type(skb);
 	if (IS_ERR(tnl_type))
 		return PTR_ERR(tnl_type);
@@ -1003,10 +1096,27 @@
 	if (unlikely(ret))
 		return ret;
 	tnl_params.tops_entry_proto = tnl_type->tops_entry;
+	tnl_params.cdrt = skb_hnat_cdrt(skb);
 
 	return mtk_tops_tnl_offload(skb, tnl_type, &tnl_params);
 }
 
+static int mtk_tops_tnl_encap_offload(struct sk_buff *skb)
+{
+	if (unlikely(!mtk_tops_mcu_alive())) {
+		skb_mark_unbind(skb);
+		return -EAGAIN;
+	}
+
+	if (!skb_hnat_is_encap(skb))
+		return -EPERM;
+
+	if (unlikely(skb_hnat_cdrt(skb)))
+		return mtk_tops_tnl_l2_update(skb);
+
+	return __mtk_tops_tnl_encap_offload(skb);
+}
+
 static struct net_device *mtk_tops_get_tnl_dev(int tnl_idx)
 {
 	if (tnl_idx < TOPS_CRSN_TNL_ID_START || tnl_idx > TOPS_CRSN_TNL_ID_END)
@@ -1136,6 +1246,7 @@
 
 static int mtk_tops_tnl_sync_param_delete(struct tops_tnl_info *tnl_info)
 {
+	struct tops_tnl_params tnl_params;
 	int ret;
 
 	ret = mtk_tops_tnl_info_dipfilter_tear_down(tnl_info);
@@ -1145,13 +1256,14 @@
 		return ret;
 	}
 
+	memcpy(&tnl_params, &tnl_info->tnl_params, sizeof(struct tops_tnl_params));
 	ret = __mtk_tops_tnl_sync_param_delete(tnl_info);
 	if (ret) {
 		TOPS_ERR("tnl sync deletion failed: %d\n", ret);
 		return ret;
 	}
 
-	ret = mtk_tops_tnl_info_cls_tear_down(tnl_info);
+	ret = mtk_tops_tnl_info_cls_tear_down(tnl_info, &tnl_params);
 	if (ret) {
 		TOPS_ERR("tnl sync cls tear down faild: %d\n",
 			 ret);
@@ -1200,7 +1312,7 @@
 	int ret;
 
 	if (setup_pce) {
-		ret = mtk_tops_tnl_info_cls_setup(tnl_info);
+		ret = mtk_tops_tnl_info_cls_setup(tnl_info, &tnl_info->tnl_params);
 		if (ret) {
 			TOPS_ERR("tnl cls setup failed: %d\n", ret);
 			return ret;
@@ -1227,7 +1339,7 @@
 	return ret;
 
 cls_tear_down:
-	mtk_tops_tnl_info_cls_tear_down(tnl_info);
+	mtk_tops_tnl_info_cls_tear_down(tnl_info, &tnl_info->tnl_params);
 
 	return ret;
 }
@@ -1532,7 +1644,7 @@
 
 		mtk_tops_tnl_info_dipfilter_tear_down(tnl_info);
 
-		mtk_tops_tnl_info_cls_tear_down(tnl_info);
+		mtk_tops_tnl_info_cls_tear_down(tnl_info, &tnl_info->tnl_params);
 	}
 
 	spin_unlock_irqrestore(&tops_tnl.tbl_lock, flag);
diff --git a/target/linux/mediatek/patches-5.4/999-4100-mtk-tops-tunnel-offload-support.patch b/target/linux/mediatek/patches-5.4/999-4100-mtk-tunnel-offload-support.patch
similarity index 91%
rename from target/linux/mediatek/patches-5.4/999-4100-mtk-tops-tunnel-offload-support.patch
rename to target/linux/mediatek/patches-5.4/999-4100-mtk-tunnel-offload-support.patch
index 3c17392..0522574 100644
--- a/target/linux/mediatek/patches-5.4/999-4100-mtk-tops-tunnel-offload-support.patch
+++ b/target/linux/mediatek/patches-5.4/999-4100-mtk-tunnel-offload-support.patch
@@ -1,6 +1,6 @@
 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
 +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
-@@ -246,6 +246,9 @@ static const char * const mtk_clks_sourc
+@@ -245,6 +245,9 @@ static const char * const mtk_clks_sourc
  	"top_netsys_warp_sel",
  };
  
@@ -10,7 +10,7 @@
  void mtk_w32(struct mtk_eth *eth, u32 val, unsigned reg)
  {
  	__raw_writel(val, eth->base + reg);
-@@ -2172,6 +2175,7 @@ static int mtk_poll_rx(struct napi_struc
+@@ -2171,6 +2174,7 @@ static int mtk_poll_rx(struct napi_struc
  	u64 addr64 = 0;
  	u8 *data, *new_data;
  	struct mtk_rx_dma_v2 *rxd, trxd;
@@ -18,7 +18,7 @@
  	int done = 0;
  
  	if (unlikely(!ring))
-@@ -2215,11 +2219,20 @@ static int mtk_poll_rx(struct napi_struc
+@@ -2214,11 +2218,20 @@ static int mtk_poll_rx(struct napi_struc
  				      0 : RX_DMA_GET_SPORT(trxd.rxd4) - 1;
  		}
  
@@ -43,7 +43,7 @@
  
  		if (unlikely(test_bit(MTK_RESETTING, &eth->state)))
  			goto release_desc;
-@@ -2304,6 +2317,8 @@ static int mtk_poll_rx(struct napi_struc
+@@ -2303,6 +2316,8 @@ static int mtk_poll_rx(struct napi_struc
  		skb_hnat_alg(skb) = 0;
  		skb_hnat_filled(skb) = 0;
  		skb_hnat_magic_tag(skb) = HNAT_MAGIC_TAG;
@@ -192,7 +192,7 @@
  	rcu_read_unlock_bh();
  
  	return 0;
-@@ -1202,6 +1233,72 @@ static struct ethhdr *get_ipv6_ipip_ethh
+@@ -1202,6 +1233,81 @@ static struct ethhdr *get_ipv6_ipip_ethh
  	return eth;
  }
  
@@ -205,6 +205,15 @@
 +	memcpy(entry,
 +	       &hnat_priv->foe_table_cpu[skb_hnat_ppe(skb)][skb_hnat_entry(skb)],
 +	       sizeof(*entry));
++
++#if defined(CONFIG_MEDIATEK_NETSYS_V2) || defined(CONFIG_MEDIATEK_NETSYS_V3)
++	entry->bfib1.mc = 0;
++#endif /* defined(CONFIG_MEDIATEK_NETSYS_V2) || defined(CONFIG_MEDIATEK_NETSYS_V3) */
++	entry->bfib1.ka = 0;
++	entry->bfib1.vlan_layer = 0;
++	entry->bfib1.psn = 0;
++	entry->bfib1.vpm = 0;
++	entry->bfib1.ps = 0;
 +}
 +
 +static inline void hnat_qos_tnl(u32 id, const struct net_device *dev)
@@ -265,7 +274,7 @@
  static unsigned int skb_to_hnat_info(struct sk_buff *skb,
  				     const struct net_device *dev,
  				     struct foe_entry *foe,
-@@ -1238,6 +1335,11 @@ static unsigned int skb_to_hnat_info(str
+@@ -1238,6 +1344,11 @@ static unsigned int skb_to_hnat_info(str
  	if (whnat && is_hnat_pre_filled(foe))
  		return 0;
  
@@ -277,7 +286,7 @@
  	entry.bfib1.pkt_type = foe->udib1.pkt_type; /* Get packte type state*/
  	entry.bfib1.state = foe->udib1.state;
  
-@@ -1629,6 +1732,10 @@ static unsigned int skb_to_hnat_info(str
+@@ -1633,6 +1744,10 @@ static unsigned int skb_to_hnat_info(str
  	/* Fill Layer2 Info.*/
  	entry = ppe_fill_L2_info(eth, entry, hw_path);
  
@@ -288,7 +297,7 @@
  	/* Fill Info Blk*/
  	entry = ppe_fill_info_blk(eth, entry, hw_path);
  
-@@ -1827,7 +1934,20 @@ static unsigned int skb_to_hnat_info(str
+@@ -1833,7 +1948,20 @@ static unsigned int skb_to_hnat_info(str
  			entry.ipv6_5t_route.act_dp |= UDF_HNAT_PRE_FILLED;
  	}
  
@@ -309,7 +318,7 @@
  	memcpy(foe, &entry, sizeof(entry));
  	/*reset statistic for this entry*/
  	if (hnat_priv->data->per_flow_accounting &&
-@@ -1880,6 +2000,7 @@ int mtk_sw_nat_hook_tx(struct sk_buff *s
+@@ -1886,6 +2014,7 @@ int mtk_sw_nat_hook_tx(struct sk_buff *s
  		return NF_ACCEPT;
  
  	eth = eth_hdr(skb);
@@ -317,7 +326,7 @@
  	memcpy(&bfib1_tx, &entry->bfib1, sizeof(entry->bfib1));
  
  	/*not bind multicast if PPE mcast not enable*/
-@@ -1899,6 +2020,12 @@ int mtk_sw_nat_hook_tx(struct sk_buff *s
+@@ -1905,6 +2034,12 @@ int mtk_sw_nat_hook_tx(struct sk_buff *s
  	switch ((int)bfib1_tx.pkt_type) {
  	case IPV4_HNAPT:
  	case IPV4_HNAT:
@@ -330,7 +339,7 @@
  		entry->ipv4_hnapt.smac_hi = swab32(*((u32 *)eth->h_source));
  		entry->ipv4_hnapt.smac_lo = swab16(*((u16 *)&eth->h_source[4]));
  		break;
-@@ -2060,6 +2187,10 @@ int mtk_sw_nat_hook_tx(struct sk_buff *s
+@@ -2066,6 +2201,10 @@ int mtk_sw_nat_hook_tx(struct sk_buff *s
  		entry->ipv6_5t_route.iblk2.dp = gmac_no;
  	}
  
@@ -341,7 +350,7 @@
  	bfib1_tx.ttl = 1;
  	bfib1_tx.state = BIND;
  	wmb();
-@@ -2081,6 +2212,7 @@ int mtk_sw_nat_hook_rx(struct sk_buff *s
+@@ -2087,6 +2226,7 @@ int mtk_sw_nat_hook_rx(struct sk_buff *s
  	}
  
  	skb_hnat_alg(skb) = 0;
@@ -349,7 +358,7 @@
  	skb_hnat_magic_tag(skb) = HNAT_MAGIC_TAG;
  
  	if (skb_hnat_iface(skb) == FOE_MAGIC_WED0)
-@@ -2529,6 +2661,7 @@ static unsigned int mtk_hnat_nf_post_rou
+@@ -2535,6 +2675,7 @@ static unsigned int mtk_hnat_nf_post_rou
  	struct flow_offload_hw_path hw_path = { .dev = (struct net_device*)out,
  						.virt_dev = (struct net_device*)out };
  	const struct net_device *arp_dev = out;
@@ -357,7 +366,7 @@
  
  	if (xlat_toggle && !mtk_464xlat_post_process(skb, out))
  		return 0;
-@@ -2549,10 +2682,18 @@ static unsigned int mtk_hnat_nf_post_rou
+@@ -2551,10 +2692,18 @@ static unsigned int mtk_hnat_nf_post_rou
  
  	if (out->netdev_ops->ndo_flow_offload_check) {
  		out->netdev_ops->ndo_flow_offload_check(&hw_path);
@@ -376,7 +385,7 @@
  		return 0;
  
  	trace_printk("[%s] case hit, %x-->%s, reason=%x\n", __func__,
-@@ -2572,9 +2713,18 @@ static unsigned int mtk_hnat_nf_post_rou
+@@ -2574,9 +2723,18 @@ static unsigned int mtk_hnat_nf_post_rou
  		if (fn && !mtk_hnat_accel_type(skb))
  			break;
  
@@ -396,7 +405,7 @@
  		skb_to_hnat_info(skb, out, entry, &hw_path);
  		break;
  	case HIT_BIND_KEEPALIVE_DUP_OLD_HDR:
-@@ -2845,7 +2995,7 @@ mtk_hnat_ipv4_nf_local_out(void *priv, s
+@@ -2847,7 +3005,7 @@ mtk_hnat_ipv4_nf_local_out(void *priv, s
  	if (iph->protocol == IPPROTO_IPV6) {
  		entry->udib1.pkt_type = IPV6_6RD;
  		hnat_set_head_frags(state, skb, 0, hnat_set_alg);
diff --git a/target/linux/mediatek/patches-5.4/999-4101-mtk-tops-network-service-error-recover-support.patch b/target/linux/mediatek/patches-5.4/999-4101-mtk-tunnel-network-service-error-recover-support.patch
similarity index 96%
rename from target/linux/mediatek/patches-5.4/999-4101-mtk-tops-network-service-error-recover-support.patch
rename to target/linux/mediatek/patches-5.4/999-4101-mtk-tunnel-network-service-error-recover-support.patch
index fbb46ac..9f9e26a 100644
--- a/target/linux/mediatek/patches-5.4/999-4101-mtk-tops-network-service-error-recover-support.patch
+++ b/target/linux/mediatek/patches-5.4/999-4101-mtk-tunnel-network-service-error-recover-support.patch
@@ -38,7 +38,7 @@
  
  #define MTK_ETHTOOL_STAT(x) { #x, \
  			      offsetof(struct mtk_hw_stats, x) / sizeof(u64) }
-@@ -4057,6 +4058,8 @@ static void mtk_pending_work(struct work
+@@ -4141,6 +4142,8 @@ static void mtk_pending_work(struct work
  				}
  			pr_warn("wait for MTK_FE_START_RESET\n");
  		}
diff --git a/target/linux/mediatek/patches-5.4/999-4103-mtk-crypto-esp-offload-support.patch b/target/linux/mediatek/patches-5.4/999-4102-mtk-crypto-offload-support.patch
similarity index 90%
rename from target/linux/mediatek/patches-5.4/999-4103-mtk-crypto-esp-offload-support.patch
rename to target/linux/mediatek/patches-5.4/999-4102-mtk-crypto-offload-support.patch
index a77ef85..360be72 100644
--- a/target/linux/mediatek/patches-5.4/999-4103-mtk-crypto-esp-offload-support.patch
+++ b/target/linux/mediatek/patches-5.4/999-4102-mtk-crypto-offload-support.patch
@@ -10,7 +10,7 @@
  		skb_hnat_set_is_decap(skb, 0);
 --- a/drivers/net/ethernet/mediatek/mtk_hnat/hnat_nf_hook.c
 +++ b/drivers/net/ethernet/mediatek/mtk_hnat/hnat_nf_hook.c
-@@ -1075,6 +1075,9 @@ static unsigned int hnat_ipv4_get_nextho
+@@ -1076,6 +1076,9 @@ static unsigned int hnat_ipv4_get_nextho
  		return 0;
  	}
  
@@ -20,7 +20,7 @@
  	rcu_read_lock_bh();
  	nexthop = (__force u32)rt_nexthop(rt, ip_hdr(skb)->daddr);
  	neigh = __ipv4_neigh_lookup_noref(dev, nexthop);
-@@ -1096,7 +1099,7 @@ static unsigned int hnat_ipv4_get_nextho
+@@ -1097,7 +1100,7 @@ static unsigned int hnat_ipv4_get_nextho
  	 * outer header, we must update its outer mac header pointer
  	 * before filling outer mac or it may screw up inner mac
  	 */
@@ -29,7 +29,7 @@
  		skb_push(skb, sizeof(struct ethhdr));
  		skb_reset_mac_header(skb);
  	}
-@@ -1104,7 +1107,7 @@ static unsigned int hnat_ipv4_get_nextho
+@@ -1105,7 +1108,7 @@ static unsigned int hnat_ipv4_get_nextho
  	memcpy(eth_hdr(skb)->h_dest, neigh->ha, ETH_ALEN);
  	memcpy(eth_hdr(skb)->h_source, out->dev_addr, ETH_ALEN);
  
@@ -38,7 +38,7 @@
  		skb_pull(skb, sizeof(struct ethhdr));
  
  	rcu_read_unlock_bh();
-@@ -1289,6 +1292,9 @@ static inline void hnat_fill_offload_eng
+@@ -1299,6 +1302,9 @@ static inline void hnat_fill_offload_eng
  		 */
  		entry->ipv4_hnapt.tport_id = NR_TDMA_QDMA_TPORT;
  		entry->ipv4_hnapt.tops_entry = skb_hnat_tops(skb);
@@ -48,7 +48,7 @@
  	} else {
  		return;
  	}
-@@ -1334,7 +1340,8 @@ static unsigned int skb_to_hnat_info(str
+@@ -1344,7 +1350,8 @@ static unsigned int skb_to_hnat_info(str
  	if (whnat && is_hnat_pre_filled(foe))
  		return 0;
  
@@ -58,7 +58,7 @@
  		hnat_get_filled_unbind_entry(skb, &entry);
  		goto hnat_entry_bind;
  	}
-@@ -1734,7 +1741,8 @@ static unsigned int skb_to_hnat_info(str
+@@ -1744,7 +1751,8 @@ static unsigned int skb_to_hnat_info(str
  	/* Fill Layer2 Info.*/
  	entry = ppe_fill_L2_info(eth, entry, hw_path);
  
@@ -68,7 +68,7 @@
  		goto hnat_entry_skip_bind;
  
  hnat_entry_bind:
-@@ -1938,6 +1946,8 @@ hnat_entry_bind:
+@@ -1950,6 +1958,8 @@ hnat_entry_bind:
  
  #if defined(CONFIG_MEDIATEK_NETSYS_V3)
  	hnat_fill_offload_engine_entry(skb, &entry, dev);
@@ -77,7 +77,7 @@
  #endif
  
  hnat_entry_skip_bind:
-@@ -2215,6 +2225,7 @@ int mtk_sw_nat_hook_rx(struct sk_buff *s
+@@ -2227,6 +2237,7 @@ int mtk_sw_nat_hook_rx(struct sk_buff *s
  
  	skb_hnat_alg(skb) = 0;
  	skb_hnat_set_tops(skb, 0);
@@ -85,7 +85,7 @@
  	skb_hnat_magic_tag(skb) = HNAT_MAGIC_TAG;
  
  	if (skb_hnat_iface(skb) == FOE_MAGIC_WED0)
-@@ -2301,7 +2312,8 @@ static unsigned int mtk_hnat_accel_type(
+@@ -2313,7 +2324,8 @@ static unsigned int mtk_hnat_accel_type(
  	 * is from local_out which is also filtered in sanity check.
  	 */
  	dst = skb_dst(skb);
@@ -95,7 +95,7 @@
  		return 0;
  
  	ct = nf_ct_get(skb, &ctinfo);
-@@ -2993,7 +3005,10 @@ mtk_hnat_ipv4_nf_local_out(void *priv, s
+@@ -3005,7 +3017,10 @@ mtk_hnat_ipv4_nf_local_out(void *priv, s
  	if (iph->protocol == IPPROTO_IPV6) {
  		entry->udib1.pkt_type = IPV6_6RD;
  		hnat_set_head_frags(state, skb, 0, hnat_set_alg);
diff --git a/target/linux/mediatek/patches-5.4/999-4103-mtk-tunnel-crypto-offload-support.patch b/target/linux/mediatek/patches-5.4/999-4103-mtk-tunnel-crypto-offload-support.patch
new file mode 100644
index 0000000..8dd7b19
--- /dev/null
+++ b/target/linux/mediatek/patches-5.4/999-4103-mtk-tunnel-crypto-offload-support.patch
@@ -0,0 +1,214 @@
+--- a/drivers/net/ethernet/mediatek/mtk_hnat/hnat.h
++++ b/drivers/net/ethernet/mediatek/mtk_hnat/hnat.h
+@@ -1091,6 +1091,8 @@ enum FoeIpAct {
+ #define NR_EIP197_QDMA_TPORT 3
+ #define NR_TDMA_TPORT 4
+ #define NR_TDMA_QDMA_TPORT 5
++#define NR_TDMA_EIP197_TPORT 8
++#define NR_TDMA_EIP197_QDMA_TPORT 9
+ #define LAN_DEV_NAME hnat_priv->lan
+ #define LAN2_DEV_NAME hnat_priv->lan2
+ #define IS_WAN(dev)                                                            \
+--- a/drivers/net/ethernet/mediatek/mtk_hnat/hnat_nf_hook.c
++++ b/drivers/net/ethernet/mediatek/mtk_hnat/hnat_nf_hook.c
+@@ -1100,7 +1100,8 @@ static unsigned int hnat_ipv4_get_nextho
+ 	 * outer header, we must update its outer mac header pointer
+ 	 * before filling outer mac or it may screw up inner mac
+ 	 */
+-	if ((skb_hnat_tops(skb) && skb_hnat_is_encap(skb)) || skb_hnat_cdrt(skb)) {
++	if ((skb_hnat_tops(skb) && skb_hnat_is_encap(skb))
++	    || (skb_hnat_cdrt(skb) && skb_hnat_is_encrypt(skb))) {
+ 		skb_push(skb, sizeof(struct ethhdr));
+ 		skb_reset_mac_header(skb);
+ 	}
+@@ -1108,7 +1109,8 @@ static unsigned int hnat_ipv4_get_nextho
+ 	memcpy(eth_hdr(skb)->h_dest, neigh->ha, ETH_ALEN);
+ 	memcpy(eth_hdr(skb)->h_source, out->dev_addr, ETH_ALEN);
+ 
+-	if ((skb_hnat_tops(skb) && skb_hnat_is_encap(skb)) || skb_hnat_cdrt(skb))
++	if ((skb_hnat_tops(skb) && skb_hnat_is_encap(skb))
++	    || (skb_hnat_cdrt(skb) && skb_hnat_is_encrypt(skb)))
+ 		skb_pull(skb, sizeof(struct ethhdr));
+ 
+ 	rcu_read_unlock_bh();
+@@ -1256,6 +1258,38 @@ static inline void hnat_get_filled_unbin
+ 	entry->bfib1.ps = 0;
+ }
+ 
++/*
++ * check offload engine data is prepared
++ * return 0 for packets not related to offload engine
++ * return positive value for offload engine prepared data done
++ * return negative value for data is still constructing
++ */
++static inline int hnat_offload_engine_done(struct sk_buff *skb,
++					   struct flow_offload_hw_path *hw_path)
++{
++	struct dst_entry *dst = skb_dst(skb);
++
++	if ((skb_hnat_tops(skb) && !(hw_path->flags & FLOW_OFFLOAD_PATH_TNL))) {
++		/* tunnel encap'ed */
++		if (dst && dst_xfrm(dst))
++			/*
++			 * skb not ready to bind since it is still needs
++			 * to be encrypted
++			 */
++			return -1;
++
++		/* nothing need to be done further for this skb */
++		return 1;
++	}
++
++	if (skb_hnat_cdrt(skb) && skb_hnat_is_encrypt(skb) && dst && !dst_xfrm(dst))
++		/* crypto encrypted done */
++		return 1;
++
++	/* no need for tunnel encapsulation or crypto encryption */
++	return 0;
++}
++
+ static inline void hnat_qos_tnl(u32 id, const struct net_device *dev)
+ {
+ 	u32 cfg;
+@@ -1300,9 +1334,15 @@ static inline void hnat_fill_offload_eng
+ 		 * we fill in hnat tport and tops_entry for tunnel encapsulation
+ 		 * offloading
+ 		 */
+-		entry->ipv4_hnapt.tport_id = NR_TDMA_QDMA_TPORT;
++		if (skb_hnat_cdrt(skb) && skb_hnat_is_encrypt(skb)) {
++			entry->ipv4_hnapt.tport_id = NR_TDMA_EIP197_QDMA_TPORT;
++			entry->ipv4_hnapt.cdrt_id = skb_hnat_cdrt(skb);
++		} else {
++			entry->ipv4_hnapt.tport_id = NR_TDMA_QDMA_TPORT;
++		}
+ 		entry->ipv4_hnapt.tops_entry = skb_hnat_tops(skb);
+-	} else if (skb_hnat_cdrt(skb)) {
++
++	} else if (skb_hnat_cdrt(skb) && skb_hnat_is_encrypt(skb)) {
+ 		entry->ipv4_hnapt.tport_id = NR_EIP197_QDMA_TPORT;
+ 		entry->ipv4_hnapt.cdrt_id = skb_hnat_cdrt(skb);
+ 	} else {
+@@ -1334,6 +1374,7 @@ static unsigned int skb_to_hnat_info(str
+ 	u32 port_id = 0;
+ 	u32 payload_len = 0;
+ 	int mape = 0;
++	int ret;
+ 
+ 	ct = nf_ct_get(skb, &ctinfo);
+ 
+@@ -1350,10 +1391,12 @@ static unsigned int skb_to_hnat_info(str
+ 	if (whnat && is_hnat_pre_filled(foe))
+ 		return 0;
+ 
+-	if ((skb_hnat_tops(skb) && !(hw_path->flags & FLOW_OFFLOAD_PATH_TNL))
+-	    || (skb_hnat_cdrt(skb) && skb_dst(skb) && !dst_xfrm(skb_dst(skb)))) {
++	ret = hnat_offload_engine_done(skb, hw_path);
++	if (ret == 1) {
+ 		hnat_get_filled_unbind_entry(skb, &entry);
+ 		goto hnat_entry_bind;
++	} else if (ret == -1) {
++		return 0;
+ 	}
+ 
+ 	entry.bfib1.pkt_type = foe->udib1.pkt_type; /* Get packte type state*/
+@@ -1752,7 +1795,8 @@ static unsigned int skb_to_hnat_info(str
+ 	entry = ppe_fill_L2_info(eth, entry, hw_path);
+ 
+ 	if ((skb_hnat_tops(skb) && hw_path->flags & FLOW_OFFLOAD_PATH_TNL)
+-	    || (!skb_hnat_cdrt(skb) && skb_dst(skb) && dst_xfrm(skb_dst(skb))))
++	    || (!skb_hnat_cdrt(skb) && skb_hnat_is_encrypt(skb)
++	        && skb_dst(skb) && dst_xfrm(skb_dst(skb))))
+ 		goto hnat_entry_skip_bind;
+ 
+ hnat_entry_bind:
+@@ -1958,7 +2002,7 @@ hnat_entry_bind:
+ 
+ #if defined(CONFIG_MEDIATEK_NETSYS_V3)
+ 	hnat_fill_offload_engine_entry(skb, &entry, dev);
+-	if (skb_hnat_cdrt(skb))
++	if (skb_hnat_cdrt(skb) && skb_hnat_is_encrypt(skb))
+ 		entry = ppe_fill_L2_info(eth, entry, hw_path);
+ #endif
+ 
+@@ -2238,6 +2282,7 @@ int mtk_sw_nat_hook_rx(struct sk_buff *s
+ 	skb_hnat_alg(skb) = 0;
+ 	skb_hnat_set_tops(skb, 0);
+ 	skb_hnat_set_cdrt(skb, 0);
++	skb_hnat_set_is_decrypt(skb, 0);
+ 	skb_hnat_magic_tag(skb) = HNAT_MAGIC_TAG;
+ 
+ 	if (skb_hnat_iface(skb) == FOE_MAGIC_WED0)
+@@ -3018,7 +3063,8 @@ mtk_hnat_ipv4_nf_local_out(void *priv, s
+ 		entry->udib1.pkt_type = IPV6_6RD;
+ 		hnat_set_head_frags(state, skb, 0, hnat_set_alg);
+ 	} else if (is_magic_tag_valid(skb)
+-		   && (skb_hnat_cdrt(skb) || skb_hnat_tops(skb))) {
++		   && ((skb_hnat_cdrt(skb) && skb_hnat_is_encrypt(skb))
++		       || skb_hnat_tops(skb))) {
+ 		hnat_set_head_frags(state, skb, 0, hnat_set_alg);
+ 	} else {
+ 		hnat_set_head_frags(state, skb, 1, hnat_set_alg);
+--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+@@ -2316,11 +2316,14 @@ static int mtk_poll_rx(struct napi_struc
+ 
+ 		skb_hnat_alg(skb) = 0;
+ 		skb_hnat_filled(skb) = 0;
+-		skb_hnat_set_cdrt(skb, 0);
++		skb_hnat_set_cdrt(skb, RX_DMA_GET_CDRT(trxd.rxd7));
+ 		skb_hnat_magic_tag(skb) = HNAT_MAGIC_TAG;
+ 		skb_hnat_set_tops(skb, 0);
+ 		skb_hnat_set_is_decap(skb, 0);
+ 
++		if (skb_hnat_cdrt(skb))
++			skb_hnat_set_is_decrypt(skb, 1);
++
+ 		if (skb_hnat_reason(skb) == HIT_BIND_FORCE_TO_CPU) {
+ 			trace_printk("[%s] reason=0x%x(force to CPU) from WAN to Ext\n",
+ 				     __func__, skb_hnat_reason(skb));
+--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+@@ -640,6 +640,9 @@
+ #define RX_DMA_GET_AGG_CNT_V2(_x)	(((_x) >> 16) & 0xff)
+ #define RX_DMA_GET_TOPS_CRSN(_x)	(((_x) >> 24) & 0xff)
+ 
++/* PDMA V2 descriptor rxd7 */
++#define RX_DMA_GET_CDRT(_x)		(((_x) >> 8) & 0xff)
++
+ /* PHY Polling and SMI Master Control registers */
+ #define MTK_PPSC		0x10000
+ #define PPSC_MDC_CFG		GENMASK(29, 24)
+--- a/drivers/net/ethernet/mediatek/mtk_hnat/nf_hnat_mtk.h
++++ b/drivers/net/ethernet/mediatek/mtk_hnat/nf_hnat_mtk.h
+@@ -47,7 +47,8 @@ struct hnat_desc {
+ 	u32 tops : 6;
+ 	u32 is_decap : 1;
+ 	u32 cdrt : 8;
+-	u32 resv3 : 4;
++	u32 is_decrypt : 1;
++	u32 resv3 : 3;
+ 	u32 magic_tag_protect : 16;
+ } __packed;
+ #elif defined(CONFIG_MEDIATEK_NETSYS_RX_V2)
+@@ -101,7 +102,10 @@ struct hnat_desc {
+ #define skb_hnat_set_tops(skb, tops) ((skb_hnat_tops(skb)) = (tops))
+ #define skb_hnat_set_is_decap(skb, is_decap) ((skb_hnat_is_decap(skb)) = (is_decap))
+ #define skb_hnat_cdrt(skb) (((struct hnat_desc *)((skb)->head))->cdrt)
++#define skb_hnat_is_decrypt(skb) (((struct hnat_desc *)((skb)->head))->is_decrypt)
++#define skb_hnat_is_encrypt(skb) (!skb_hnat_is_decrypt(skb))
+ #define skb_hnat_set_cdrt(skb, cdrt) ((skb_hnat_cdrt(skb)) = (cdrt))
++#define skb_hnat_set_is_decrypt(skb, is_dec) ((skb_hnat_is_decrypt(skb)) = is_dec)
+ #else /* !defined(CONFIG_MEDIATEK_NETSYS_V3) */
+ #define skb_hnat_tops(skb) (0)
+ #define skb_hnat_is_decap(skb) (0)
+@@ -109,7 +113,10 @@ struct hnat_desc {
+ #define skb_hnat_set_tops(skb, tops)
+ #define skb_hnat_set_is_decap(skb, is_decap)
+ #define skb_hnat_cdrt(skb) (0)
++#define skb_hnat_is_decrypt(skb) (0)
++#define skb_hnat_is_encrypt(skb) (0)
+ #define skb_hnat_set_cdrt(skb, cdrt)
++#define skb_hnat_set_is_decrypt(skb, is_dec)
+ #endif /* defined(CONFIG_MEDIATEK_NETSYS_V3) */
+ #define skb_hnat_magic(skb) (((struct hnat_desc *)(skb->head))->magic)
+ #define skb_hnat_reason(skb) (((struct hnat_desc *)(skb->head))->crsn)
diff --git a/target/linux/mediatek/patches-5.4/999-4500-mtk-tops-gre-offload-support.patch b/target/linux/mediatek/patches-5.4/999-4500-mtk-gre-offload-support.patch
similarity index 100%
rename from target/linux/mediatek/patches-5.4/999-4500-mtk-tops-gre-offload-support.patch
rename to target/linux/mediatek/patches-5.4/999-4500-mtk-gre-offload-support.patch
diff --git a/target/linux/mediatek/patches-5.4/999-4500-mtk-tops-l2tp-offload-support.patch b/target/linux/mediatek/patches-5.4/999-4500-mtk-l2tp-offload-support.patch
similarity index 100%
rename from target/linux/mediatek/patches-5.4/999-4500-mtk-tops-l2tp-offload-support.patch
rename to target/linux/mediatek/patches-5.4/999-4500-mtk-l2tp-offload-support.patch