[][openwrt][mt7988][crypto][EIP197 Alpha Release]

[Description]
Add alpha version of EIP197 package(crypto-eip)

[Release-log]
N/A

Change-Id: Ib90915f531aa238b90bd0fecc81e2ec3bf6016cc
Reviewed-on: https://gerrit.mediatek.inc/c/openwrt/feeds/mtk_openwrt_feeds/+/7908928
diff --git a/package-21.02/kernel/crypto-eip/Config.in b/package-21.02/kernel/crypto-eip/Config.in
new file mode 100644
index 0000000..c68eb36
--- /dev/null
+++ b/package-21.02/kernel/crypto-eip/Config.in
@@ -0,0 +1,30 @@
+menu "Crypto Offload Configuration"
+	depends on PACKAGE_kmod-crypto-eip
+
+choice
+	prompt "Crypto Offload Mode"
+	default CRYPTO_OFFLOAD_INLINE
+	help
+	  Select crypto offload mode. Support lookaside mode or inline mode.
+
+config CRYPTO_OFFLOAD_INLINE
+	bool "Inline Mode"
+	depends on TARGET_mediatek_mt7988 && !PACKAGE_kmod-eip197_driver
+	select PACKAGE_kmod-crypto-eip-inline
+	select PACKAGE_kmod-crypto-eip-ddk
+	select PACKAGE_crypto-eip-inline-fw
+endchoice
+
+choice
+	prompt "Crypto XFRM Offload Method"
+	depends on CRYPTO_OFFLOAD_INLINE
+	default CRYPTO_XFRM_OFFLOAD_MTK_PCE
+	help
+	  Select crypto XFRM offload method. Either RAMBUS PCL support or
+	  MTK PCE support.
+
+config CRYPTO_XFRM_OFFLOAD_MTK_PCE
+	bool "Enable MTK PCE XFRM Offload"
+endchoice
+
+endmenu
diff --git a/package-21.02/kernel/crypto-eip/Makefile b/package-21.02/kernel/crypto-eip/Makefile
new file mode 100644
index 0000000..494eadb
--- /dev/null
+++ b/package-21.02/kernel/crypto-eip/Makefile
@@ -0,0 +1,46 @@
+# SPDX-Liscense-Identifier: GPL-2.0-or-later
+#
+# Copyright (C) 2023 MediaTek Inc.
+#
+# Author: Chris.Chou <chris.chou@mediatek.com>
+#         Ren-Ting Wang <ren-ting.wang@mediatek.com>
+
+include $(TOPDIR)/rules.mk
+include $(INCLUDE_DIR)/kernel.mk
+
+PKG_NAME:=crypto-eip
+PKG_RELEASE:=1
+
+include $(INCLUDE_DIR)/package.mk
+include $(INCLUDE_DIR)/package-defaults.mk
+
+# For package initialization such as menuconfig or description etc.
+EIP_KERNEL_PKGS:=
+EIP_PKGS:=
+
+include crypto-eip.mk
+include $(filter-out crypto-eip.mk,$(wildcard *.mk))
+
+EXTRA_CFLAGS+= \
+	$(patsubst CONFIG_%, -DCONFIG_%=1, $(patsubst %=m,%,$(filter %=m,$(EXTRA_KCONFIG)))) \
+	$(patsubst CONFIG_%, -DCONFIG_%=1, $(patsubst %=y,%,$(filter %=y,$(EXTRA_KCONFIG))))
+
+EXTRA_CFLAGS+= -Wall -Werror
+
+define Build/Prepare
+	mkdir -p $(PKG_BUILD_DIR)/firmware
+	$(CP) -r `find ./firmware -maxdepth 1 | grep -v ".git"` $(PKG_BUILD_DIR)/firmware/
+	$(CP) -r `find ./src -maxdepth 1 | grep -v ".git"` $(PKG_BUILD_DIR)/
+endef
+
+define Build/Compile
+	$(MAKE) -C "$(LINUX_DIR)" \
+		$(KERNEL_MAKE_FLAGS) \
+		M="$(PKG_BUILD_DIR)" \
+		EXTRA_CFLAGS="$(EXTRA_CFLAGS)" \
+		$(EXTRA_KCONFIG) \
+		modules
+endef
+
+$(foreach KERNEL_PKG,$(EIP_KERNEL_PKGS),$(eval $(call KernelPackage,$(KERNEL_PKG))))
+$(foreach PKG,$(EIP_PKGS),$(eval $(call BuildPackage,$(PKG))))
diff --git a/package-21.02/kernel/crypto-eip/crypto-eip-ddk.mk b/package-21.02/kernel/crypto-eip/crypto-eip-ddk.mk
new file mode 100644
index 0000000..bd6f30a
--- /dev/null
+++ b/package-21.02/kernel/crypto-eip/crypto-eip-ddk.mk
@@ -0,0 +1,55 @@
+# SPDX-Liscense-Identifier: GPL-2.0-or-later
+#
+# Copyright (C) 2023 MediaTek Inc.
+#
+# Author: Chris.Chou <chris.chou@mediatek.com>
+#         Ren-Ting Wang <ren-ting.wang@mediatek.com>
+
+# Configure for crypto-eip DDK makefile
+EIP_KERNEL_PKGS+= \
+	crypto-eip-ddk
+
+ifeq ($(CONFIG_PACKAGE_kmod-crypto-eip-ddk),y)
+EXTRA_KCONFIG+= \
+	CONFIG_RAMBUS_DDK=m
+
+EXTRA_CFLAGS+= \
+	-I$(PKG_BUILD_DIR)/ddk/inc \
+	-I$(PKG_BUILD_DIR)/ddk/inc/crypto-eip/ddk \
+	-I$(PKG_BUILD_DIR)/ddk/inc/crypto-eip/ddk/configs \
+	-I$(PKG_BUILD_DIR)/ddk/inc/crypto-eip/ddk/device \
+	-I$(PKG_BUILD_DIR)/ddk/inc/crypto-eip/ddk/device/lkm \
+	-I$(PKG_BUILD_DIR)/ddk/inc/crypto-eip/ddk/device/lkm/of \
+	-I$(PKG_BUILD_DIR)/ddk/inc/crypto-eip/ddk/dmares \
+	-I$(PKG_BUILD_DIR)/ddk/inc/crypto-eip/ddk/firmware_api \
+	-I$(PKG_BUILD_DIR)/ddk/inc/crypto-eip/ddk/kit/builder/sa \
+	-I$(PKG_BUILD_DIR)/ddk/inc/crypto-eip/ddk/kit/builder/token \
+	-I$(PKG_BUILD_DIR)/ddk/inc/crypto-eip/ddk/kit/eip197 \
+	-I$(PKG_BUILD_DIR)/ddk/inc/crypto-eip/ddk/kit/iotoken \
+	-I$(PKG_BUILD_DIR)/ddk/inc/crypto-eip/ddk/kit/list \
+	-I$(PKG_BUILD_DIR)/ddk/inc/crypto-eip/ddk/kit/ring \
+	-I$(PKG_BUILD_DIR)/ddk/inc/crypto-eip/ddk/libc \
+	-I$(PKG_BUILD_DIR)/ddk/inc/crypto-eip/ddk/log \
+	-I$(PKG_BUILD_DIR)/ddk/inc/crypto-eip/ddk/shdevxs \
+	-I$(PKG_BUILD_DIR)/ddk/inc/crypto-eip/ddk/slad \
+	-I$(PKG_BUILD_DIR)/ddk/inc/crypto-eip/ddk/slad/lkm \
+	-DEIP197_BUS_VERSION_AXI3 \
+	-DDRIVER_64BIT_HOST \
+	-DDRIVER_64BIT_DEVICE \
+	-DADAPTER_AUTO_TOKENBUILDER
+endif
+
+# crypto-eip-ddk kernel package configuration
+define KernelPackage/crypto-eip-ddk
+  CATEGORY:=MTK Properties
+  SUBMENU:=Drivers
+  TITLE:= MTK EIP DDK
+  FILES+=$(PKG_BUILD_DIR)/ddk/crypto-eip-ddk.ko
+  DEPENDS:= \
+	@CRYPTO_OFFLOAD_INLINE \
+	kmod-crypto-eip
+endef
+
+define KernelPackage/crypto-eip-ddk/description
+  Porting DDK source code to package.
+endef
diff --git a/package-21.02/kernel/crypto-eip/crypto-eip-fw.mk b/package-21.02/kernel/crypto-eip/crypto-eip-fw.mk
new file mode 100644
index 0000000..eabefae
--- /dev/null
+++ b/package-21.02/kernel/crypto-eip/crypto-eip-fw.mk
@@ -0,0 +1,31 @@
+# SPDX-Liscense-Identifier: GPL-2.0-or-later
+#
+# Copyright (C) 2023 MediaTek Inc.
+#
+# Author: Chris.Chou <chris.chou@mediatek.com>
+#         Ren-Ting Wang <ren-ting.wang@mediatek.com>
+
+# Configure for crypto firmware makefile
+EIP_PKGS+= \
+	crypto-eip-inline-fw
+
+define Package/crypto-eip-inline-fw
+  TITLE:=Mediatek EIP Firmware
+  SECTION:=firmware
+  CATEGORY:=Firmware
+  DEPENDS:=@CRYPTO_OFFLOAD_INLINE
+endef
+
+define Package/crypto-eip-inline-fw/description
+  Load firmware for EIP197 inline mode.
+endef
+
+define Package/crypto-eip-inline-fw/install
+	$(INSTALL_DIR) $(1)/lib/firmware/
+	$(CP) \
+		$(PKG_BUILD_DIR)/firmware/bin/firmware_eip207_ifpp.bin \
+		$(PKG_BUILD_DIR)/firmware/bin/firmware_eip207_ipue.bin \
+		$(PKG_BUILD_DIR)/firmware/bin/firmware_eip207_ofpp.bin \
+		$(PKG_BUILD_DIR)/firmware/bin/firmware_eip207_opue.bin \
+		$(1)/lib/firmware/
+endef
diff --git a/package-21.02/kernel/crypto-eip/crypto-eip.mk b/package-21.02/kernel/crypto-eip/crypto-eip.mk
new file mode 100644
index 0000000..31df9bd
--- /dev/null
+++ b/package-21.02/kernel/crypto-eip/crypto-eip.mk
@@ -0,0 +1,82 @@
+# SPDX-Liscense-Identifier: GPL-2.0-or-later
+#
+# Copyright (C) 2023 MediaTek Inc.
+#
+# Author: Chris.Chou <chris.chou@mediatek.com>
+#         Ren-Ting Wang <ren-ting.wang@mediatek.com>
+
+# Configure for crypto-eip top makefile
+EIP_KERNEL_PKGS+= \
+	crypto-eip \
+	crypto-eip-inline
+
+ifeq ($(CONFIG_PACKAGE_kmod-crypto-eip),y)
+EXTRA_KCONFIG+= \
+	CONFIG_CRYPTO_OFFLOAD_INLINE=$(CONFIG_CRYPTO_OFFLOAD_INLINE)
+endif
+
+ifeq ($(CONFIG_CRYPTO_OFFLOAD_INLINE),y)
+EXTRA_KCONFIG+= \
+	CONFIG_MTK_CRYPTO_EIP_INLINE=m \
+	CONFIG_CRYPTO_XFRM_OFFLOAD_MTK_PCE=$(CONFIG_CRYPTO_XFRM_OFFLOAD_MTK_PCE)
+
+EXTRA_CFLAGS+= \
+	-I$(LINUX_DIR)/drivers/net/ethernet/mediatek/ \
+	-I$(KERNEL_BUILD_DIR)/pce/inc/
+endif
+
+# crypto-eip kernel package configuration
+define KernelPackage/crypto-eip
+  CATEGORY:=MTK Properties
+  SUBMENU:=Drivers
+  TITLE:= EIP-197 Crypto Engine Driver
+  DEFAULT:=y
+  KCONFIG:= \
+	CONFIG_CRYPTO_HW=y \
+	CONFIG_CRYPTO_AUTHENC=y \
+	CONFIG_CRYPTO_AES=y \
+	CONFIG_CRYPTO_AEAD=y \
+	CONFIG_CRYPTO_DES=y \
+	CONFIG_CRYPTO_MD5=y \
+	CONFIG_CRYPTO_SHA1=y \
+	CONFIG_CRYPTO_SHA256=y \
+	CONFIG_CRYPTO_SHA512=y \
+	CONFIG_CRYPTO_SHA3=y \
+	CONFIG_CRYPTO_HMAC=y \
+	CONFIG_INET_ESP=y
+  DEPENDS:= \
+	@TARGET_mediatek \
+	+strongswan \
+	+strongswan-default
+  $(call AddDepends/crypto)
+endef
+
+define KernelPackage/crypto-eip/description
+  Enable crypto engine to accelerate encrypt/decrypt. Support look aside
+  mode (semi-HW) and inline mode (pure-HW). Look aside mode is bind with
+  Linux Crypto API and support AES, DES, SHA1, and SHA2 algorithms. In-
+  line mode only support ESP Tunnel mode (single tunnel) now.
+endef
+
+define KernelPackage/crypto-eip/config
+	source "$(SOURCE)/Config.in"
+endef
+
+define KernelPackage/crypto-eip-inline
+  CATEGORY:=MTK Properties
+  SUBMENU:=Drivers
+  TITLE:= EIP-197 Crypto Engine Inline Driver
+  KCONFIG:= \
+	CONFIG_INET_ESP_OFFLOAD=y
+  DEPENDS:= \
+	@CRYPTO_OFFLOAD_INLINE \
+	kmod-crypto-eip \
+	kmod-crypto-eip-ddk \
+	+kmod-pce
+  FILES:=$(PKG_BUILD_DIR)/crypto-eip-inline.ko
+  $(call AddDepends/crypto)
+endef
+
+define KernelPackage/crypto-eip-inline/description
+  EIP197 inline mode. HW offload for IPsec ESP Tunnel mode.
+endef
diff --git a/package-21.02/kernel/crypto-eip/src/Kconfig b/package-21.02/kernel/crypto-eip/src/Kconfig
new file mode 100644
index 0000000..5dbbc5f
--- /dev/null
+++ b/package-21.02/kernel/crypto-eip/src/Kconfig
@@ -0,0 +1,8 @@
+config MTK_CRYPTO_EIP_INLINE
+	tristate "Mediatek Crypto EIP Inline"
+	help
+	  Support for Mediatek driver to control inline crypto HW offload
+	  engine EIP197. The HW offload engine support IPsec ESP inline
+	  offload. Currently, it only support ESP Tunnel and transport mode.
+	  The driver cooperate XFRM offload framework with Mediatek HNAT
+	  driver to offload ESP protocol.
diff --git a/package-21.02/kernel/crypto-eip/src/Makefile b/package-21.02/kernel/crypto-eip/src/Makefile
new file mode 100644
index 0000000..6beba79
--- /dev/null
+++ b/package-21.02/kernel/crypto-eip/src/Makefile
@@ -0,0 +1,17 @@
+# SPDX-Liscense-Identifier: GPL-2.0-or-later
+#
+# Copyright (C) 2023 MediaTek Inc.
+#
+# Author: Chris.Chou <chris.chou@mediatek.com>
+#         Ren-Ting Wang <ren-ting.wang@mediatek.com>
+
+obj-$(CONFIG_MTK_CRYPTO_EIP_INLINE) += crypto-eip-inline.o
+
+ccflags-y += -I$(src)/inc
+
+crypto-eip-inline-y += init.o
+crypto-eip-inline-y += ddk-wrapper.o
+
+crypto-eip-inline-$(CONFIG_CRYPTO_XFRM_OFFLOAD_MTK_PCE) += xfrm-offload.o
+
+include $(wildcard $(src)/*.mk)
diff --git a/package-21.02/kernel/crypto-eip/src/ddk-wrapper.c b/package-21.02/kernel/crypto-eip/src/ddk-wrapper.c
new file mode 100644
index 0000000..d923693
--- /dev/null
+++ b/package-21.02/kernel/crypto-eip/src/ddk-wrapper.c
@@ -0,0 +1,573 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2023 MediaTek Inc.
+ *
+ * Author: Chris.Chou <chris.chou@mediatek.com>
+ *         Ren-Ting Wang <ren-ting.wang@mediatek.com>
+ */
+
+#include <crypto/aes.h>
+#include <crypto/hash.h>
+#include <crypto/hmac.h>
+#include <crypto/md5.h>
+#include <linux/delay.h>
+
+#include <crypto-eip/ddk/slad/api_pcl.h>
+#include <crypto-eip/ddk/slad/api_pcl_dtl.h>
+#include <crypto-eip/ddk/slad/api_pec.h>
+#include <crypto-eip/ddk/slad/api_driver197_init.h>
+
+#include "crypto-eip/crypto-eip.h"
+#include "crypto-eip/ddk-wrapper.h"
+#include "crypto-eip/internal.h"
+#include "crypto-eip/crypto-eip197-inline-ddk.h"
+
+static bool crypto_iotoken_create(IOToken_Input_Dscr_t * const dscr_p,
+				  void * const ext_p, u32 *data_p,
+				  PEC_CommandDescriptor_t * const pec_cmd_dscr)
+{
+	int IOTokenRc;
+
+	dscr_p->InPacket_ByteCount = pec_cmd_dscr->SrcPkt_ByteCount;
+	dscr_p->Ext_p = ext_p;
+
+	IOTokenRc = IOToken_Create(dscr_p, data_p);
+	if (IOTokenRc < 0) {
+		CRYPTO_ERR("IOToken_Create error %d\n", IOTokenRc);
+		return false;
+	}
+
+	pec_cmd_dscr->InputToken_p = data_p;
+
+	return true;
+}
+
+unsigned int crypto_pe_get_one(IOToken_Output_Dscr_t *const OutTokenDscr_p,
+			       u32 *OutTokenData_p,
+			       PEC_ResultDescriptor_t *RD_p)
+{
+	int LoopCounter = MTK_EIP197_INLINE_NOF_TRIES;
+	int IOToken_Rc;
+	PEC_Status_t pecres;
+
+	ZEROINIT(*OutTokenDscr_p);
+	ZEROINIT(*RD_p);
+
+	/* Link data structures */
+	RD_p->OutputToken_p = OutTokenData_p;
+
+	while (LoopCounter > 0) {
+		/* Try to get the processed packet from the driver */
+		unsigned int Counter = 0;
+
+		pecres = PEC_Packet_Get(PEC_INTERFACE_ID, RD_p, 1, &Counter);
+		if (pecres != PEC_STATUS_OK) {
+			/* IO error */
+			CRYPTO_ERR("PEC_Packet_Get error %d\n", pecres);
+			return 0;
+		}
+
+		if (Counter) {
+			IOToken_Rc = IOToken_Parse(OutTokenData_p, OutTokenDscr_p);
+			if (IOToken_Rc < 0) {
+				/* IO error */
+				CRYPTO_ERR("IOToken_Parse error %d\n", IOToken_Rc);
+				return 0;
+			}
+
+			if (OutTokenDscr_p->ErrorCode != 0) {
+				/* Packet process error */
+				CRYPTO_ERR("Result descriptor error 0x%x\n",
+					OutTokenDscr_p->ErrorCode);
+				return 0;
+			}
+
+			/* packet received */
+			return Counter;
+		}
+
+		/* Wait for MTK_EIP197_PKT_GET_TIMEOUT_MS milliseconds */
+		udelay(MTK_EIP197_PKT_GET_TIMEOUT_MS * 1000);
+		LoopCounter--;
+	}
+
+	CRYPTO_ERR("Timeout when reading packet\n");
+
+	/* IO error (timeout, not result packet received) */
+	return 0;
+}
+
+
+bool crypto_basic_hash(SABuilder_Auth_t HashAlgo, uint8_t *Input_p,
+				unsigned int InputByteCount, uint8_t *Output_p,
+				unsigned int OutputByteCount, bool fFinalize)
+{
+	SABuilder_Params_Basic_t ProtocolParams;
+	SABuilder_Params_t params;
+	unsigned int SAWords = 0;
+	static uint8_t DummyAuthKey[64];
+	int rc;
+
+	DMABuf_Properties_t DMAProperties = {0, 0, 0, 0};
+	DMABuf_HostAddress_t TokenHostAddress;
+	DMABuf_HostAddress_t PktHostAddress;
+	DMABuf_HostAddress_t SAHostAddress;
+	DMABuf_Status_t DMAStatus;
+
+	DMABuf_Handle_t TokenHandle = {0};
+	DMABuf_Handle_t PktHandle = {0};
+	DMABuf_Handle_t SAHandle = {0};
+
+	unsigned int TokenMaxWords = 0;
+	unsigned int TokenHeaderWord;
+	unsigned int TokenWords = 0;
+	unsigned int TCRWords = 0;
+	void *TCRData = 0;
+
+	TokenBuilder_Params_t TokenParams;
+	PEC_CommandDescriptor_t Cmd;
+	PEC_ResultDescriptor_t Res;
+	unsigned int count;
+
+	u32 OutputToken[IOTOKEN_IN_WORD_COUNT];
+	u32 InputToken[IOTOKEN_IN_WORD_COUNT];
+	IOToken_Output_Dscr_t OutTokenDscr;
+	IOToken_Input_Dscr_t InTokenDscr;
+	void *InTokenDscrExt_p = NULL;
+
+#ifdef CRYPTO_IOTOKEN_EXT
+	IOToken_Input_Dscr_Ext_t InTokenDscrExt;
+
+	ZEROINIT(InTokenDscrExt);
+	InTokenDscrExt_p = &InTokenDscrExt;
+#endif
+	ZEROINIT(InTokenDscr);
+	ZEROINIT(OutTokenDscr);
+
+	rc = SABuilder_Init_Basic(&params, &ProtocolParams, SAB_DIRECTION_OUTBOUND);
+	if (rc) {
+		CRYPTO_ERR("SABuilder_Init_Basic failed: %d\n", rc);
+		goto error_exit;
+	}
+
+	params.AuthAlgo = HashAlgo;
+	params.AuthKey1_p = DummyAuthKey;
+
+	if (!fFinalize)
+		params.flags |= SAB_FLAG_HASH_SAVE | SAB_FLAG_HASH_INTERMEDIATE;
+	params.flags |= SAB_FLAG_SUPPRESS_PAYLOAD;
+	ProtocolParams.ICVByteCount = OutputByteCount;
+
+	rc = SABuilder_GetSizes(&params, &SAWords, NULL, NULL);
+	if (rc) {
+		CRYPTO_ERR("SA not created because of size errors: %d\n", rc);
+		goto error_exit;
+	}
+
+	DMAProperties.fCached = true;
+	DMAProperties.Alignment = MTK_EIP197_INLINE_DMA_ALIGNMENT_BYTE_COUNT;
+	DMAProperties.Bank = MTK_EIP197_INLINE_BANK_TRANSFORM;
+	DMAProperties.Size = MAX(4*SAWords, 256);
+
+	DMAStatus = DMABuf_Alloc(DMAProperties, &SAHostAddress, &SAHandle);
+	if (DMAStatus != DMABUF_STATUS_OK) {
+		rc = 1;
+		CRYPTO_ERR("Allocation of SA failed: %d\n", DMAStatus);
+		goto error_exit;
+	}
+
+	rc = SABuilder_BuildSA(&params, (u32 *)SAHostAddress.p, NULL, NULL);
+	if (rc) {
+		CRYPTO_ERR("SA not created because of errors: %d\n", rc);
+		goto error_exit;
+	}
+
+	rc = TokenBuilder_GetContextSize(&params, &TCRWords);
+	if (rc) {
+		CRYPTO_ERR("TokenBuilder_GetContextSize returned errors: %d\n", rc);
+		goto error_exit;
+	}
+
+	TCRData = kmalloc(4 * TCRWords, GFP_KERNEL);
+	if (!TCRData) {
+		rc = 1;
+		CRYPTO_ERR("Allocation of TCR failed\n");
+		goto error_exit;
+	}
+
+	rc = TokenBuilder_BuildContext(&params, TCRData);
+	if (rc) {
+		CRYPTO_ERR("TokenBuilder_BuildContext failed: %d\n", rc);
+		goto error_exit;
+	}
+
+	rc = TokenBuilder_GetSize(TCRData, &TokenMaxWords);
+	if (rc) {
+		CRYPTO_ERR("TokenBuilder_GetSize failed: %d\n", rc);
+		goto error_exit;
+	}
+
+	DMAProperties.fCached = true;
+	DMAProperties.Alignment = MTK_EIP197_INLINE_DMA_ALIGNMENT_BYTE_COUNT;
+	DMAProperties.Bank = MTK_EIP197_INLINE_BANK_TOKEN;
+	DMAProperties.Size = 4*TokenMaxWords;
+
+	DMAStatus = DMABuf_Alloc(DMAProperties, &TokenHostAddress, &TokenHandle);
+	if (DMAStatus != DMABUF_STATUS_OK) {
+		rc = 1;
+		CRYPTO_ERR("Allocation of token builder failed: %d\n", DMAStatus);
+		goto error_exit;
+	}
+
+	DMAProperties.fCached = true;
+	DMAProperties.Alignment = MTK_EIP197_INLINE_DMA_ALIGNMENT_BYTE_COUNT;
+	DMAProperties.Bank = MTK_EIP197_INLINE_BANK_PACKET;
+	DMAProperties.Size = MAX(InputByteCount, OutputByteCount);
+
+	DMAStatus = DMABuf_Alloc(DMAProperties, &PktHostAddress, &PktHandle);
+	if (DMAStatus != DMABUF_STATUS_OK) {
+		rc = 1;
+		CRYPTO_ERR("Allocation of source packet buffer failed: %d\n",
+			   DMAStatus);
+		goto error_exit;
+	}
+
+	rc = PEC_SA_Register(PEC_INTERFACE_ID, SAHandle, DMABuf_NULLHandle,
+				DMABuf_NULLHandle);
+	if (rc != PEC_STATUS_OK) {
+		CRYPTO_ERR("PEC_SA_Register failed: %d\n", rc);
+		goto error_exit;
+	}
+
+	memcpy(PktHostAddress.p, Input_p, InputByteCount);
+
+	ZEROINIT(TokenParams);
+	TokenParams.PacketFlags |= (TKB_PACKET_FLAG_HASHFIRST
+				    | TKB_PACKET_FLAG_HASHAPPEND);
+	if (fFinalize)
+		TokenParams.PacketFlags |= TKB_PACKET_FLAG_HASHFINAL;
+
+	rc = TokenBuilder_BuildToken(TCRData, (u8 *) PktHostAddress.p,
+				     InputByteCount, &TokenParams,
+				     (u32 *) TokenHostAddress.p,
+				     &TokenWords, &TokenHeaderWord);
+	if (rc != TKB_STATUS_OK) {
+		CRYPTO_ERR("Token builder failed: %d\n", rc);
+		goto error_exit_unregister;
+	}
+
+	ZEROINIT(Cmd);
+	Cmd.Token_Handle = TokenHandle;
+	Cmd.Token_WordCount = TokenWords;
+	Cmd.SrcPkt_Handle = PktHandle;
+	Cmd.SrcPkt_ByteCount = InputByteCount;
+	Cmd.DstPkt_Handle = PktHandle;
+	Cmd.SA_Handle1 = SAHandle;
+	Cmd.SA_Handle2 = DMABuf_NULLHandle;
+
+
+#if defined(CRYPTO_IOTOKEN_EXT)
+	InTokenDscrExt.HW_Services  = IOTOKEN_CMD_PKT_LAC;
+#endif
+	InTokenDscr.TknHdrWordInit = TokenHeaderWord;
+
+	if (!crypto_iotoken_create(&InTokenDscr,
+				   InTokenDscrExt_p,
+				   InputToken,
+				   &Cmd)) {
+		rc = 1;
+		goto error_exit_unregister;
+	}
+
+	rc = PEC_Packet_Put(PEC_INTERFACE_ID, &Cmd, 1, &count);
+	if (rc != PEC_STATUS_OK && count != 1) {
+		rc = 1;
+		CRYPTO_ERR("PEC_Packet_Put error: %d\n", rc);
+		goto error_exit_unregister;
+	}
+
+	if (crypto_pe_get_one(&OutTokenDscr, OutputToken, &Res) < 1) {
+		rc = 1;
+		CRYPTO_ERR("error from crypto_pe_get_one\n");
+		goto error_exit_unregister;
+	}
+	memcpy(Output_p, PktHostAddress.p, OutputByteCount);
+
+error_exit_unregister:
+	PEC_SA_UnRegister(PEC_INTERFACE_ID, SAHandle, DMABuf_NULLHandle,
+				DMABuf_NULLHandle);
+
+error_exit:
+	DMABuf_Release(SAHandle);
+	DMABuf_Release(TokenHandle);
+	DMABuf_Release(PktHandle);
+
+	if (TCRData != NULL)
+		kfree(TCRData);
+
+	return rc == 0;
+}
+
+bool crypto_hmac_precompute(SABuilder_Auth_t AuthAlgo,
+			    uint8_t *AuthKey_p,
+			    unsigned int AuthKeyByteCount,
+			    uint8_t *Inner_p,
+			    uint8_t *Outer_p)
+{
+	SABuilder_Auth_t HashAlgo;
+	unsigned int blocksize, hashsize, digestsize;
+	static uint8_t pad_block[128], hashed_key[128];
+	unsigned int i;
+
+	switch (AuthAlgo) {
+	case SAB_AUTH_HMAC_MD5:
+		HashAlgo = SAB_AUTH_HASH_MD5;
+		blocksize = 64;
+		hashsize = 16;
+		digestsize = 16;
+		break;
+	case SAB_AUTH_HMAC_SHA1:
+		HashAlgo = SAB_AUTH_HASH_SHA1;
+		blocksize = 64;
+		hashsize = 20;
+		digestsize = 20;
+		break;
+	case SAB_AUTH_HMAC_SHA2_224:
+		HashAlgo = SAB_AUTH_HASH_SHA2_224;
+		blocksize = 64;
+		hashsize = 28;
+		digestsize = 32;
+		break;
+	case SAB_AUTH_HMAC_SHA2_256:
+		HashAlgo = SAB_AUTH_HASH_SHA2_256;
+		blocksize = 64;
+		hashsize = 32;
+		digestsize = 32;
+		break;
+	case SAB_AUTH_HMAC_SHA2_384:
+		HashAlgo = SAB_AUTH_HASH_SHA2_384;
+		blocksize = 128;
+		hashsize = 48;
+		digestsize = 64;
+		break;
+	case SAB_AUTH_HMAC_SHA2_512:
+		HashAlgo = SAB_AUTH_HASH_SHA2_512;
+		blocksize = 128;
+		hashsize = 64;
+		digestsize = 64;
+		break;
+	default:
+		CRYPTO_ERR("Unknown HMAC algorithm\n");
+		return false;
+	}
+
+	memset(hashed_key, 0, blocksize);
+	if (AuthKeyByteCount <= blocksize) {
+		memcpy(hashed_key, AuthKey_p, AuthKeyByteCount);
+	} else {
+		if (!crypto_basic_hash(HashAlgo, AuthKey_p, AuthKeyByteCount,
+				       hashed_key, hashsize, true))
+			return false;
+	}
+
+	for (i = 0; i < blocksize; i++)
+		pad_block[i] = hashed_key[i] ^ 0x36;
+
+	if (!crypto_basic_hash(HashAlgo, pad_block, blocksize,
+			       Inner_p, digestsize, false))
+		return false;
+
+	for (i = 0; i < blocksize; i++)
+		pad_block[i] = hashed_key[i] ^ 0x5c;
+
+	if (!crypto_basic_hash(HashAlgo, pad_block, blocksize,
+			       Outer_p, digestsize, false))
+		return false;
+
+	return true;
+}
+
+static SABuilder_Crypto_t set_crypto_algo(struct xfrm_algo *ealg)
+{
+	if (strcmp(ealg->alg_name, "cbc(des)") == 0)
+		return SAB_CRYPTO_DES;
+	else if (strcmp(ealg->alg_name, "cbc(aes)") == 0)
+		return SAB_CRYPTO_AES;
+	else if (strcmp(ealg->alg_name, "cbc(des3_ede)") == 0)
+		return SAB_CRYPTO_3DES;
+
+	return SAB_CRYPTO_NULL;
+}
+
+static bool set_auth_algo(struct xfrm_algo_auth *aalg, SABuilder_Params_t *params,
+			  uint8_t *inner, uint8_t *outer)
+{
+	if (strcmp(aalg->alg_name, "hmac(sha1)") == 0) {
+		params->AuthAlgo = SAB_AUTH_HMAC_SHA1;
+		inner = kcalloc(SHA1_DIGEST_SIZE, sizeof(uint8_t), GFP_KERNEL);
+		outer = kcalloc(SHA1_DIGEST_SIZE, sizeof(uint8_t), GFP_KERNEL);
+		crypto_hmac_precompute(SAB_AUTH_HMAC_SHA1, &aalg->alg_key[0],
+					aalg->alg_key_len / 8, inner, outer);
+
+		params->AuthKey1_p = inner;
+		params->AuthKey2_p = outer;
+	} else if (strcmp(aalg->alg_name, "hmac(sha256)") == 0) {
+		params->AuthAlgo = SAB_AUTH_HMAC_SHA2_256;
+		inner = kcalloc(SHA256_DIGEST_SIZE, sizeof(uint8_t), GFP_KERNEL);
+		outer = kcalloc(SHA256_DIGEST_SIZE, sizeof(uint8_t), GFP_KERNEL);
+		crypto_hmac_precompute(SAB_AUTH_HMAC_SHA2_256, &aalg->alg_key[0],
+					aalg->alg_key_len / 8, inner, outer);
+		params->AuthKey1_p = inner;
+		params->AuthKey2_p = outer;
+	} else if (strcmp(aalg->alg_name, "hmac(sha384)") == 0) {
+		params->AuthAlgo = SAB_AUTH_HMAC_SHA2_384;
+		inner = kcalloc(SHA384_DIGEST_SIZE, sizeof(uint8_t), GFP_KERNEL);
+		outer = kcalloc(SHA384_DIGEST_SIZE, sizeof(uint8_t), GFP_KERNEL);
+		crypto_hmac_precompute(SAB_AUTH_HMAC_SHA2_384, &aalg->alg_key[0],
+					aalg->alg_key_len / 8, inner, outer);
+		params->AuthKey1_p = inner;
+		params->AuthKey2_p = outer;
+	} else if (strcmp(aalg->alg_name, "hmac(sha512)") == 0) {
+		params->AuthAlgo = SAB_AUTH_HMAC_SHA2_512;
+		inner = kcalloc(SHA512_DIGEST_SIZE, sizeof(uint8_t), GFP_KERNEL);
+		outer = kcalloc(SHA512_DIGEST_SIZE, sizeof(uint8_t), GFP_KERNEL);
+		crypto_hmac_precompute(SAB_AUTH_HMAC_SHA2_512, &aalg->alg_key[0],
+					aalg->alg_key_len / 8, inner, outer);
+		params->AuthKey1_p = inner;
+		params->AuthKey2_p = outer;
+	} else if (strcmp(aalg->alg_name, "hmac(md5)") == 0) {
+		params->AuthAlgo = SAB_AUTH_HMAC_MD5;
+		inner = kcalloc(MD5_DIGEST_SIZE, sizeof(uint8_t), GFP_KERNEL);
+		outer = kcalloc(MD5_DIGEST_SIZE, sizeof(uint8_t), GFP_KERNEL);
+		crypto_hmac_precompute(SAB_AUTH_HMAC_MD5, &aalg->alg_key[0],
+					aalg->alg_key_len / 8, inner, outer);
+		params->AuthKey1_p = inner;
+		params->AuthKey2_p = outer;
+	} else {
+		return false;
+	}
+
+	return true;
+}
+
+u32 *mtk_ddk_tr_ipsec_build(struct mtk_xfrm_params *xfrm_params, u32 ipsec_mode)
+{
+	struct xfrm_state *xs = xfrm_params->xs;
+	SABuilder_Params_IPsec_t ipsec_params;
+	SABuilder_Status_t sa_status;
+	SABuilder_Params_t params;
+	bool set_auth_success = false;
+	unsigned int SAWords = 0;
+	uint8_t *inner, *outer;
+
+	DMABuf_Status_t dma_status;
+	DMABuf_Properties_t dma_properties = {0, 0, 0, 0};
+	DMABuf_HostAddress_t sa_host_addr;
+
+	DMABuf_Handle_t sa_handle = {0};
+
+	sa_status = SABuilder_Init_ESP(&params,
+				       &ipsec_params,
+				       be32_to_cpu(xs->id.spi),
+				       ipsec_mode,
+				       SAB_IPSEC_IPV4,
+				       xfrm_params->dir);
+
+	if (sa_status != SAB_STATUS_OK) {
+		pr_err("SABuilder_Init_ESP failed\n");
+		sa_handle.p = NULL;
+		return (u32 *) sa_handle.p;
+	}
+
+	/* Add crypto key and parameters */
+	params.CryptoAlgo = set_crypto_algo(xs->ealg);
+	params.CryptoMode = SAB_CRYPTO_MODE_CBC;
+	params.KeyByteCount = xs->ealg->alg_key_len / 8;
+	params.Key_p = xs->ealg->alg_key;
+
+	/* Add authentication key and parameters */
+	set_auth_success = set_auth_algo(xs->aalg, &params, inner, outer);
+	if (set_auth_success != true) {
+		CRYPTO_ERR("Set Auth Algo failed\n");
+		sa_handle.p = NULL;
+		return (u32 *) sa_handle.p;
+	}
+
+	ipsec_params.IPsecFlags |= (SAB_IPSEC_PROCESS_IP_HEADERS
+				    | SAB_IPSEC_EXT_PROCESSING);
+	if (ipsec_mode == SAB_IPSEC_TUNNEL) {
+		ipsec_params.SrcIPAddr_p = (uint8_t *) &xs->props.saddr.a4;
+		ipsec_params.DestIPAddr_p = (uint8_t *) &xs->id.daddr.a4;
+	}
+
+	sa_status = SABuilder_GetSizes(&params, &SAWords, NULL, NULL);
+	if (sa_status != SAB_STATUS_OK) {
+		CRYPTO_ERR("SA not created because of size errors\n");
+		sa_handle.p = NULL;
+		return (u32 *) sa_handle.p;
+	}
+
+	dma_properties.fCached = true;
+	dma_properties.Alignment = MTK_EIP197_INLINE_DMA_ALIGNMENT_BYTE_COUNT;
+	dma_properties.Bank = MTK_EIP197_INLINE_BANK_TRANSFORM;
+	dma_properties.Size = SAWords * sizeof(u32);
+
+	dma_status = DMABuf_Alloc(dma_properties, &sa_host_addr, &sa_handle);
+	if (dma_status != DMABUF_STATUS_OK) {
+		CRYPTO_ERR("Allocation of SA failed\n");
+		/* goto error_exit; */
+		sa_handle.p = NULL;
+		return (u32 *) sa_handle.p;
+	}
+
+	sa_status = SABuilder_BuildSA(&params, (u32 *) sa_host_addr.p, NULL, NULL);
+	if (sa_status != SAB_STATUS_OK) {
+		CRYPTO_ERR("SA not created because of errors\n");
+		sa_handle.p = NULL;
+		return (u32 *) sa_handle.p;
+	}
+
+	kfree(inner);
+	kfree(outer);
+	return (u32 *) sa_host_addr.p;
+}
+
+int mtk_ddk_pec_init(void)
+{
+	PEC_InitBlock_t pec_init_blk = {0, 0, false};
+	PEC_Capabilities_t pec_cap;
+	PEC_Status_t pec_sta;
+	u32 i = MTK_EIP197_INLINE_NOF_TRIES;
+
+	while (i) {
+		pec_sta = PEC_Init(PEC_INTERFACE_ID, &pec_init_blk);
+		if (pec_sta == PEC_STATUS_OK) {
+			CRYPTO_INFO("PEC_INIT ok!\n");
+			break;
+		} else if (pec_sta != PEC_STATUS_OK && pec_sta != PEC_STATUS_BUSY) {
+			return pec_sta;
+		}
+
+		mdelay(MTK_EIP197_INLINE_RETRY_DELAY_MS);
+		i--;
+	}
+
+	if (!i) {
+		CRYPTO_ERR("PEC could not be initialized: %d\n", pec_sta);
+		return pec_sta;
+	}
+
+	pec_sta = PEC_Capabilities_Get(&pec_cap);
+	if (pec_sta != PEC_STATUS_OK) {
+		CRYPTO_ERR("PEC capability could not be obtained: %d\n", pec_sta);
+		return pec_sta;
+	}
+
+	CRYPTO_INFO("PEC Capabilities: %s\n", pec_cap.szTextDescription);
+
+	return 0;
+}
+
+void mtk_ddk_pec_deinit(void)
+{
+}
diff --git a/package-21.02/kernel/crypto-eip/src/ddk.mk b/package-21.02/kernel/crypto-eip/src/ddk.mk
new file mode 100644
index 0000000..7c2a3c6
--- /dev/null
+++ b/package-21.02/kernel/crypto-eip/src/ddk.mk
@@ -0,0 +1,8 @@
+# SPDX-Liscense-Identifier: GPL-2.0-or-later
+#
+# Copyright (C) 2023 MediaTek Inc.
+#
+# Author: Chris.Chou <chris.chou@mediatek.com>
+#         Ren-Ting Wang <ren-ting.wang@mediatek.com>
+
+obj-$(CONFIG_RAMBUS_DDK) += ddk/
diff --git a/package-21.02/kernel/crypto-eip/src/inc/crypto-eip/crypto-eip.h b/package-21.02/kernel/crypto-eip/src/inc/crypto-eip/crypto-eip.h
new file mode 100644
index 0000000..24f6f91
--- /dev/null
+++ b/package-21.02/kernel/crypto-eip/src/inc/crypto-eip/crypto-eip.h
@@ -0,0 +1,60 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2023 MediaTek Inc.
+ *
+ * Author: Chris.Chou <chris.chou@mediatek.com>
+ *         Ren-Ting Wang <ren-ting.wang@mediatek.com>
+ */
+
+#ifndef _CRYPTO_EIP_H_
+#define _CRYPTO_EIP_H_
+
+#include <crypto/sha.h>
+#include <linux/io.h>
+#include <linux/list.h>
+#include <net/xfrm.h>
+
+#include "crypto-eip/crypto-eip197-inline-ddk.h"
+
+struct mtk_crypto;
+
+extern struct mtk_crypto mcrypto;
+
+#define TRANSFORM_RECORD_LEN		64
+
+#define MAX_TUNNEL_NUM			10
+#define PACKET_INBOUND			1
+#define PACKET_OUTBOUND			2
+
+#define HASH_CACHE_SIZE			SHA512_BLOCK_SIZE
+
+#define EIP197_FORCE_CLK_ON2		(0xfffd8)
+#define EIP197_FORCE_CLK_ON		(0xfffe8)
+#define EIP197_AUTO_LOOKUP_1		(0xfffffffc)
+#define EIP197_AUTO_LOOKUP_2		(0xffffffff)
+
+struct mtk_crypto {
+	struct mtk_eth *eth;
+	void __iomem *crypto_base;
+	void __iomem *eth_base;
+};
+
+struct mtk_xfrm_params {
+	struct xfrm_state *xs;
+	struct list_head node;
+	struct cdrt_entry *cdrt;
+
+	u32 *p_tr;			/* pointer to transform record */
+	u32 dir;			/* SABuilder_Direction_t */
+};
+
+void crypto_eth_write(u32 reg, u32 val);
+
+/* xfrm callback functions */
+int mtk_xfrm_offload_state_add(struct xfrm_state *xs);
+void mtk_xfrm_offload_state_delete(struct xfrm_state *xs);
+void mtk_xfrm_offload_state_free(struct xfrm_state *xs);
+void mtk_xfrm_offload_state_tear_down(void);
+int mtk_xfrm_offload_policy_add(struct xfrm_policy *xp);
+bool mtk_xfrm_offload_ok(struct sk_buff *skb, struct xfrm_state *xs);
+#endif /* _CRYPTO_EIP_H_ */
diff --git a/package-21.02/kernel/crypto-eip/src/inc/crypto-eip/crypto-eip197-inline-ddk.h b/package-21.02/kernel/crypto-eip/src/inc/crypto-eip/crypto-eip197-inline-ddk.h
new file mode 100644
index 0000000..00fb1a8
--- /dev/null
+++ b/package-21.02/kernel/crypto-eip/src/inc/crypto-eip/crypto-eip197-inline-ddk.h
@@ -0,0 +1,60 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2023 MediaTek Inc.
+ *
+ * Author: Chris.Chou <chris.chou@mediatek.com>
+ *         Ren-Ting Wang <ren-ting.wang@mediatek.com>
+ */
+
+#ifndef _CRYPTO_EIP197_INLINE_DDK_H_
+#define _CRYPTO_EIP197_INLINE_DDK_H_
+
+#include <crypto-eip/ddk/basic_defs.h>
+#include <crypto-eip/ddk/configs/cs_ddk197.h>
+#include <crypto-eip/ddk/configs/cs_driver_ext.h>
+#include <crypto-eip/ddk/configs/cs_adapter.h>
+#include <crypto-eip/ddk/libc/clib.h>
+#include <crypto-eip/ddk/kit/builder/sa/sa_builder.h>
+#include <crypto-eip/ddk/kit/builder/sa/sa_builder_ipsec.h>
+#include <crypto-eip/ddk/kit/builder/sa/sa_builder_basic.h>
+#include <crypto-eip/ddk/kit/builder/sa/sa_builder_params_basic.h>
+#include <crypto-eip/ddk/kit/builder/token/token_builder.h>
+#include <crypto-eip/ddk/kit/iotoken/iotoken.h>
+#include <crypto-eip/ddk/kit/iotoken/iotoken_ext.h>
+#include <crypto-eip/ddk/slad/api_dmabuf.h>
+#include <crypto-eip/ddk/slad/api_pcl.h>
+#include <crypto-eip/ddk/slad/api_pcl_dtl.h>
+#include <crypto-eip/ddk/slad/api_pec.h>
+#include <crypto-eip/ddk/log/log.h>
+
+#ifdef DDK_EIP197_FW33_FEATURES
+#define MTK_EIP197_INLINE_STRIP_PADDING
+#endif
+
+#define CRYPTO_IOTOKEN_EXT
+
+#define MTK_EIP197_INLINE_BANK_PACKET			0
+
+#define MTK_EIP197_INLINE_BANK_TRANSFORM		1
+#define MTK_EIP197_INLINE_BANK_TOKEN			0
+
+/* Delay in milliseconds between tries to receive the packet */
+#define MTK_EIP197_INLINE_RETRY_DELAY_MS		2
+#define MTK_EIP197_PKT_GET_TIMEOUT_MS			2
+
+/* Maximum number of tries to receive the packet. */
+#define MTK_EIP197_INLINE_NOF_TRIES			10
+
+#define MTK_EIP197_INLINE_DMA_ALIGNMENT_BYTE_COUNT	16
+
+/* PEC Configuration */
+#ifdef DDK_PEC_IF_ID
+#define PEC_INTERFACE_ID				DDK_PEC_IF_ID
+#else
+#define PEC_INTERFACE_ID				0
+#endif
+#define PEC_REDIRECT_INTERFACE				7
+#define PEC_INLINE_INTERFACE				15
+
+#define PCL_INTERFACE_ID				0
+#endif /* _CRYPTO_EIP197_INLINE_DDK_H_ */
diff --git a/package-21.02/kernel/crypto-eip/src/inc/crypto-eip/ddk-wrapper.h b/package-21.02/kernel/crypto-eip/src/inc/crypto-eip/ddk-wrapper.h
new file mode 100644
index 0000000..db2b84f
--- /dev/null
+++ b/package-21.02/kernel/crypto-eip/src/inc/crypto-eip/ddk-wrapper.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2023 MediaTek Inc.
+ *
+ * Author: Chris.Chou <chris.chou@mediatek.com>
+ *         Ren-Ting Wang <ren-ting.wang@mediatek.com>
+ */
+
+#ifndef _CRYPTO_EIP_DDK_WRAPPER_H_
+#define _CRYPTO_EIP_DDK_WRAPPER_H_
+
+#include "crypto-eip.h"
+
+u32 *mtk_ddk_tr_ipsec_build(struct mtk_xfrm_params *xfrm_params, u32 ipsec_mod);
+
+int mtk_ddk_pec_init(void);
+void mtk_ddk_pec_deinit(void);
+#endif /* _CRYPTO_EIP_DDK_WRAPPER_H_ */
diff --git a/package-21.02/kernel/crypto-eip/src/inc/crypto-eip/internal.h b/package-21.02/kernel/crypto-eip/src/inc/crypto-eip/internal.h
new file mode 100644
index 0000000..e843841
--- /dev/null
+++ b/package-21.02/kernel/crypto-eip/src/inc/crypto-eip/internal.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2023 MediaTek Inc.
+ *
+ * Author: Chris.Chou <chris.chou@mediatek.com>
+ *         Ren-Ting Wang <ren-ting.wang@mediatek.com>
+ */
+
+#ifndef _CRYPTO_EIP_INTERNAL_H_
+#define _CRYPTO_EIP_INTERNAL_H_
+
+#include <linux/device.h>
+
+extern struct device *crypto_dev;
+
+#define CRYPTO_DBG(fmt, ...)		dev_dbg(crypto_dev, fmt, ##__VA_ARGS__)
+#define CRYPTO_INFO(fmt, ...)		dev_info(crypto_dev, fmt, ##__VA_ARGS__)
+#define CRYPTO_NOTICE(fmt, ...)		dev_notice(crypto_dev, fmt, ##__VA_ARGS__)
+#define CRYPTO_WARN(fmt, ...)		dev_warn(crypto_dev, fmt, ##__VA_ARGS__)
+#define CRYPTO_ERR(fmt, ...)		dev_err(crypto_dev, fmt, ##__VA_ARGS__)
+
+#define setbits(addr, set)		writel(readl(addr) | (set), (addr))
+#define clrbits(addr, clr)		writel(readl(addr) & ~(clr), (addr))
+#define clrsetbits(addr, clr, set)	writel((readl(addr) & ~(clr)) | (set), (addr))
+#endif /* _CRYPTO_EIP_INTERNAL_H_ */
diff --git a/package-21.02/kernel/crypto-eip/src/init.c b/package-21.02/kernel/crypto-eip/src/init.c
new file mode 100644
index 0000000..cc53a39
--- /dev/null
+++ b/package-21.02/kernel/crypto-eip/src/init.c
@@ -0,0 +1,260 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2023 MediaTek Inc.
+ *
+ * Author: Chris.Chou <chris.chou@mediatek.com>
+ *         Ren-Ting Wang <ren-ting.wang@mediatek.com>
+ */
+
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+
+#include <mtk_eth_soc.h>
+#include <mtk_hnat/hnat.h>
+
+#include <crypto-eip/ddk/configs/cs_hwpal_ext.h>
+
+#include "crypto-eip/crypto-eip.h"
+#include "crypto-eip/ddk-wrapper.h"
+#include "crypto-eip/internal.h"
+
+#define DRIVER_AUTHOR	"Ren-Ting Wang <ren-ting.wang@mediatek.com, " \
+			"Chris.Chou <chris.chou@mediatek.com"
+
+struct mtk_crypto mcrypto;
+struct device *crypto_dev;
+
+inline void crypto_eth_write(u32 reg, u32 val)
+{
+	writel(val, mcrypto.eth_base + reg);
+}
+
+static inline void crypto_eip_write(u32 reg, u32 val)
+{
+	writel(val, mcrypto.crypto_base + reg);
+}
+
+static inline void crypto_eip_set(u32 reg, u32 mask)
+{
+	setbits(mcrypto.crypto_base + reg, mask);
+}
+
+static inline void crypto_eip_clr(u32 reg, u32 mask)
+{
+	clrbits(mcrypto.crypto_base + reg, mask);
+}
+
+static inline void crypto_eip_rmw(u32 reg, u32 mask, u32 val)
+{
+	clrsetbits(mcrypto.crypto_base + reg, mask, val);
+}
+
+static inline u32 crypto_eip_read(u32 reg)
+{
+	return readl(mcrypto.crypto_base + reg);
+}
+
+static bool mtk_crypto_eip_offloadable(struct sk_buff *skb)
+{
+	/* TODO: check is esp */
+	return true;
+}
+
+static const struct xfrmdev_ops mtk_xfrmdev_ops = {
+	.xdo_dev_state_add = mtk_xfrm_offload_state_add,
+	.xdo_dev_state_delete = mtk_xfrm_offload_state_delete,
+	.xdo_dev_state_free = mtk_xfrm_offload_state_free,
+	.xdo_dev_offload_ok = mtk_xfrm_offload_ok,
+
+	/* Not support at v5.4*/
+	.xdo_dev_policy_add = mtk_xfrm_offload_policy_add,
+};
+
+static void mtk_crypto_xfrm_offload_deinit(struct mtk_eth *eth)
+{
+	int i;
+
+	mtk_crypto_offloadable = NULL;
+
+	for (i = 0; i < MTK_MAC_COUNT; i++) {
+		eth->netdev[i]->xfrmdev_ops = NULL;
+		eth->netdev[i]->features &= (~NETIF_F_HW_ESP);
+		eth->netdev[i]->hw_enc_features &= (~NETIF_F_HW_ESP);
+		rtnl_lock();
+		netdev_change_features(eth->netdev[i]);
+		rtnl_unlock();
+	}
+}
+
+static void mtk_crypto_xfrm_offload_init(struct mtk_eth *eth)
+{
+	int i;
+
+	for (i = 0; i < MTK_MAC_COUNT; i++) {
+		eth->netdev[i]->xfrmdev_ops = &mtk_xfrmdev_ops;
+		eth->netdev[i]->features |= NETIF_F_HW_ESP;
+		eth->netdev[i]->hw_enc_features |= NETIF_F_HW_ESP;
+		rtnl_lock();
+		netdev_change_features(eth->netdev[i]);
+		rtnl_unlock();
+	}
+
+	mtk_crypto_offloadable = mtk_crypto_eip_offloadable;
+}
+
+static int __init mtk_crypto_eth_dts_init(struct platform_device *pdev)
+{
+	struct platform_device *eth_pdev;
+	struct device_node *crypto_node;
+	struct device_node *eth_node;
+	struct resource res;
+	int ret = 0;
+
+	crypto_node = pdev->dev.of_node;
+
+	eth_node = of_parse_phandle(crypto_node, "eth", 0);
+	if (!eth_node)
+		return -ENODEV;
+
+	eth_pdev = of_find_device_by_node(eth_node);
+	if (!eth_pdev) {
+		ret = -ENODEV;
+		goto out;
+	}
+
+	if (!eth_pdev->dev.driver) {
+		ret = -EFAULT;
+		goto out;
+	}
+
+	if (of_address_to_resource(eth_node, 0, &res)) {
+		ret = -ENXIO;
+		goto out;
+	}
+
+	mcrypto.eth_base = devm_ioremap(&pdev->dev,
+					res.start, resource_size(&res));
+	if (!mcrypto.eth_base) {
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	mcrypto.eth = platform_get_drvdata(eth_pdev);
+
+out:
+	of_node_put(eth_node);
+
+	return ret;
+}
+
+static int __init mtk_crypto_eip_dts_init(void)
+{
+	struct platform_device *crypto_pdev;
+	struct device_node *crypto_node;
+	struct resource res;
+	int ret;
+
+	crypto_node = of_find_compatible_node(NULL, NULL, HWPAL_PLATFORM_DEVICE_NAME);
+	if (!crypto_node)
+		return -ENODEV;
+
+	crypto_pdev = of_find_device_by_node(crypto_node);
+	if (!crypto_pdev) {
+		ret = -ENODEV;
+		goto out;
+	}
+
+	/* check crypto platform device is ready */
+	if (!crypto_pdev->dev.driver) {
+		ret = -EFAULT;
+		goto out;
+	}
+
+	if (of_address_to_resource(crypto_node, 0, &res)) {
+		ret = -ENXIO;
+		goto out;
+	}
+
+	mcrypto.crypto_base = devm_ioremap(&crypto_pdev->dev,
+					   res.start, resource_size(&res));
+	if (!mcrypto.crypto_base) {
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	ret = mtk_crypto_eth_dts_init(crypto_pdev);
+	if (ret)
+		goto out;
+
+	crypto_dev = &crypto_pdev->dev;
+
+out:
+	of_node_put(crypto_node);
+
+	return ret;
+}
+
+static int __init mtk_crypto_eip_hw_init(void)
+{
+	crypto_eip_write(EIP197_FORCE_CLK_ON, 0xffffffff);
+
+	crypto_eip_write(EIP197_FORCE_CLK_ON2, 0xffffffff);
+
+	/* TODO: adjust AXI burst? */
+
+	mtk_ddk_pec_init();
+
+	return 0;
+}
+
+static void __exit mtk_crypto_eip_hw_deinit(void)
+{
+	mtk_ddk_pec_deinit();
+
+	crypto_eip_write(EIP197_FORCE_CLK_ON, 0);
+
+	crypto_eip_write(EIP197_FORCE_CLK_ON2, 0);
+}
+
+static int __init mtk_crypto_eip_init(void)
+{
+	int ret;
+
+	ret = mtk_crypto_eip_dts_init();
+	if (ret) {
+		CRYPTO_ERR("crypto-eip dts init failed: %d\n", ret);
+		return ret;
+	}
+
+	ret = mtk_crypto_eip_hw_init();
+	if (ret) {
+		CRYPTO_ERR("crypto-eip hw init failed: %d\n", ret);
+		return ret;
+	}
+
+	mtk_crypto_xfrm_offload_init(mcrypto.eth);
+
+	CRYPTO_INFO("crypto-eip init done\n");
+
+	return ret;
+}
+
+static void __exit mtk_crypto_eip_exit(void)
+{
+	/* TODO: deactivate all tunnel */
+
+	mtk_crypto_xfrm_offload_deinit(mcrypto.eth);
+
+	mtk_crypto_eip_hw_deinit();
+}
+
+module_init(mtk_crypto_eip_init);
+module_exit(mtk_crypto_eip_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("MediaTek Crypto EIP Control Driver");
+MODULE_AUTHOR(DRIVER_AUTHOR);
diff --git a/package-21.02/kernel/crypto-eip/src/xfrm-offload.c b/package-21.02/kernel/crypto-eip/src/xfrm-offload.c
new file mode 100644
index 0000000..bd79147
--- /dev/null
+++ b/package-21.02/kernel/crypto-eip/src/xfrm-offload.c
@@ -0,0 +1,278 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2023 MediaTek Inc.
+ *
+ * Author: Chris.Chou <chris.chou@mediatek.com>
+ *         Ren-Ting Wang <ren-ting.wang@mediatek.com>
+ */
+
+#include <linux/bitops.h>
+
+#include <mtk_eth_soc.h>
+#include <mtk_hnat/hnat.h>
+#include <mtk_hnat/nf_hnat_mtk.h>
+
+#include <pce/cdrt.h>
+#include <pce/cls.h>
+#include <pce/netsys.h>
+
+#include <crypto-eip/ddk/configs/cs_hwpal_ext.h>
+#include <crypto-eip/ddk/kit/iotoken/iotoken.h>
+#include <crypto-eip/ddk/kit/iotoken/iotoken_ext.h>
+
+#include "crypto-eip/crypto-eip.h"
+#include "crypto-eip/ddk-wrapper.h"
+#include "crypto-eip/internal.h"
+
+static LIST_HEAD(xfrm_params_head);
+
+static void mtk_xfrm_offload_cdrt_tear_down(struct mtk_xfrm_params *xfrm_params)
+{
+	memset(&xfrm_params->cdrt->desc, 0, sizeof(struct cdrt_desc));
+
+	mtk_pce_cdrt_entry_write(xfrm_params->cdrt);
+}
+
+static int mtk_xfrm_offload_cdrt_setup(struct mtk_xfrm_params *xfrm_params)
+{
+	struct cdrt_desc *cdesc = &xfrm_params->cdrt->desc;
+
+	cdesc->desc1.common.type = 3;
+	cdesc->desc1.token_len = 48;
+	cdesc->desc1.p_tr[0] = __pa(xfrm_params->p_tr) | 2;
+
+	cdesc->desc2.hw_srv = 2;
+	cdesc->desc2.allow_pad = 1;
+	cdesc->desc2.strip_pad = 1;
+
+	return mtk_pce_cdrt_entry_write(xfrm_params->cdrt);
+}
+
+static int mtk_xfrm_offload_cls_entry_setup(struct mtk_xfrm_params *xfrm_params)
+{
+	struct cls_entry esp_cls_entry = {
+		.entry = xfrm_params->cdrt->idx + 10, /* TODO: need update */
+		.cdesc = {
+			.fport = 0x3,
+			.tport_idx = 0x2,
+			.cdrt_idx = xfrm_params->cdrt->idx,
+
+			.l4_udp_hdr_nez_m = 0x1,
+			.l4_udp_hdr_nez = 0x1,
+			.l4_hdr_usr_data_m = 0xffffffff,
+			.l4_hdr_usr_data = be32_to_cpu(xfrm_params->xs->id.spi),
+			.l4_valid_m = 0x3,
+			.l4_valid = 0x3,
+			.l4_dport_m = 0x0,
+			.l4_dport = 0x0,
+			.tag_m = 0x3,
+			.tag = 0x1,
+			.dip_match_m = 0x0,
+			.dip_match = 0x0,
+			.l4_type_m = 0xff,
+			.l4_type = 0x32,
+		},
+	};
+
+	return mtk_pce_cls_entry_register(&esp_cls_entry);
+}
+
+static void mtk_xfrm_offload_context_tear_down(struct mtk_xfrm_params *xfrm_params)
+{
+	mtk_xfrm_offload_cdrt_tear_down(xfrm_params);
+
+	/* TODO: free context */
+	devm_kfree(crypto_dev, xfrm_params->p_tr);
+
+	/* TODO: transform record tear down */
+}
+
+static int mtk_xfrm_offload_context_setup(struct mtk_xfrm_params *xfrm_params)
+{
+	u32 *tr;
+	int ret;
+
+	xfrm_params->p_tr = devm_kcalloc(crypto_dev, sizeof(u32),
+					 TRANSFORM_RECORD_LEN, GFP_KERNEL);
+	if (unlikely(!xfrm_params->p_tr))
+		return -ENOMEM;
+
+	switch (xfrm_params->xs->outer_mode.encap) {
+	case XFRM_MODE_TUNNEL:
+		tr = mtk_ddk_tr_ipsec_build(xfrm_params, SAB_IPSEC_TUNNEL);
+		break;
+	case XFRM_MODE_TRANSPORT:
+		tr = mtk_ddk_tr_ipsec_build(xfrm_params, SAB_IPSEC_TRANSPORT);
+		break;
+	default:
+		ret = -ENOMEM;
+		goto err_out;
+	}
+
+	if (!tr) {
+		ret = -EINVAL;
+		goto err_out;
+	}
+
+	memcpy(xfrm_params->p_tr, tr, sizeof(u32) * TRANSFORM_RECORD_LEN);
+
+	/* TODO: free tr */
+
+	return mtk_xfrm_offload_cdrt_setup(xfrm_params);
+
+err_out:
+	devm_kfree(crypto_dev, xfrm_params->p_tr);
+
+	return ret;
+}
+
+static int mtk_xfrm_offload_state_add_outbound(struct xfrm_state *xs,
+					       struct mtk_xfrm_params *xfrm_params)
+{
+	int ret;
+
+	xfrm_params->cdrt = mtk_pce_cdrt_entry_alloc(CDRT_ENCRYPT);
+	if (IS_ERR(xfrm_params->cdrt))
+		return PTR_ERR(xfrm_params->cdrt);
+
+	xfrm_params->dir = SAB_DIRECTION_OUTBOUND;
+
+	ret = mtk_xfrm_offload_context_setup(xfrm_params);
+	if (ret)
+		goto free_cdrt;
+
+	return ret;
+
+free_cdrt:
+	mtk_pce_cdrt_entry_free(xfrm_params->cdrt);
+
+	return ret;
+}
+
+static int mtk_xfrm_offload_state_add_inbound(struct xfrm_state *xs,
+					      struct mtk_xfrm_params *xfrm_params)
+{
+	int ret;
+
+	xfrm_params->cdrt = mtk_pce_cdrt_entry_alloc(CDRT_DECRYPT);
+	if (IS_ERR(xfrm_params->cdrt))
+		return PTR_ERR(xfrm_params->cdrt);
+
+	xfrm_params->dir = SAB_DIRECTION_INBOUND;
+
+	ret = mtk_xfrm_offload_context_setup(xfrm_params);
+	if (ret)
+		goto free_cdrt;
+
+	ret = mtk_xfrm_offload_cls_entry_setup(xfrm_params);
+	if (ret)
+		goto tear_down_context;
+
+	return ret;
+
+tear_down_context:
+	mtk_xfrm_offload_context_tear_down(xfrm_params);
+
+free_cdrt:
+	mtk_pce_cdrt_entry_free(xfrm_params->cdrt);
+
+	return ret;
+}
+
+int mtk_xfrm_offload_state_add(struct xfrm_state *xs)
+{
+	struct mtk_xfrm_params *xfrm_params;
+	int ret = 0;
+
+	/* TODO: maybe support IPv6 in the future? */
+	if (xs->props.family != AF_INET) {
+		CRYPTO_NOTICE("Only IPv4 xfrm states may be offloaded\n");
+		return -EINVAL;
+	}
+
+	/* only support ESP right now */
+	if (xs->id.proto != IPPROTO_ESP) {
+		CRYPTO_NOTICE("Unsupported protocol 0x%04x\n", xs->id.proto);
+		return -EINVAL;
+	}
+
+	/* only support tunnel mode or transport mode */
+	if (!(xs->outer_mode.encap == XFRM_MODE_TUNNEL
+	    || xs->outer_mode.encap == XFRM_MODE_TRANSPORT))
+		return -EINVAL;
+
+	xfrm_params = devm_kzalloc(crypto_dev,
+				   sizeof(struct mtk_xfrm_params),
+				   GFP_KERNEL);
+	if (!xfrm_params)
+		return -ENOMEM;
+
+	xfrm_params->xs = xs;
+	INIT_LIST_HEAD(&xfrm_params->node);
+
+	if (xs->xso.flags & XFRM_OFFLOAD_INBOUND)
+		/* rx path */
+		ret = mtk_xfrm_offload_state_add_inbound(xs, xfrm_params);
+	else
+		/* tx path */
+		ret = mtk_xfrm_offload_state_add_outbound(xs, xfrm_params);
+
+	if (ret) {
+		devm_kfree(crypto_dev, xfrm_params);
+		goto out;
+	}
+
+	xs->xso.offload_handle = (unsigned long)xfrm_params;
+	list_add_tail(&xfrm_params->node, &xfrm_params_head);
+out:
+	return ret;
+}
+
+void mtk_xfrm_offload_state_delete(struct xfrm_state *xs)
+{
+}
+
+void mtk_xfrm_offload_state_free(struct xfrm_state *xs)
+{
+	struct mtk_xfrm_params *xfrm_params;
+
+	if (!xs->xso.offload_handle)
+		return;
+
+	xfrm_params = (struct mtk_xfrm_params *)xs->xso.offload_handle;
+
+	list_del(&xfrm_params->node);
+
+	if (xs->xso.flags & XFRM_OFFLOAD_INBOUND)
+		mtk_pce_cdrt_entry_free(xfrm_params->cdrt);
+
+	mtk_xfrm_offload_context_tear_down(xfrm_params);
+
+	mtk_pce_cdrt_entry_free(xfrm_params->cdrt);
+
+	devm_kfree(crypto_dev, xfrm_params);
+}
+
+void mtk_xfrm_offload_state_tear_down(void)
+{
+	struct mtk_xfrm_params *xfrm_params, *tmp;
+
+	list_for_each_entry_safe(xfrm_params, tmp, &xfrm_params_head, node)
+		mtk_xfrm_offload_state_free(xfrm_params->xs);
+}
+
+int mtk_xfrm_offload_policy_add(struct xfrm_policy *xp)
+{
+	return 0;
+}
+
+bool mtk_xfrm_offload_ok(struct sk_buff *skb,
+			 struct xfrm_state *xs)
+{
+	struct mtk_xfrm_params *xfrm_params;
+
+	xfrm_params = (struct mtk_xfrm_params *)xs->xso.offload_handle;
+	skb_hnat_cdrt(skb) = xfrm_params->cdrt->idx;
+
+	return false;
+}
diff --git a/target/linux/mediatek/patches-5.4/999-1022-backport-linux-6.2-xfrm-packet-mode.patch b/target/linux/mediatek/patches-5.4/999-1022-backport-linux-6.2-xfrm-packet-mode.patch
new file mode 100644
index 0000000..9eb9347
--- /dev/null
+++ b/target/linux/mediatek/patches-5.4/999-1022-backport-linux-6.2-xfrm-packet-mode.patch
@@ -0,0 +1,980 @@
+--- a/include/linux/netdevice.h
++++ b/include/linux/netdevice.h
+@@ -919,6 +919,10 @@ struct xfrmdev_ops {
+ 	bool	(*xdo_dev_offload_ok) (struct sk_buff *skb,
+ 				       struct xfrm_state *x);
+ 	void	(*xdo_dev_state_advance_esn) (struct xfrm_state *x);
++	void	(*xdo_dev_state_update_curlft) (struct xfrm_state *x);
++	int	(*xdo_dev_policy_add) (struct xfrm_policy *x);
++	void	(*xdo_dev_policy_delete) (struct xfrm_policy *x);
++	void	(*xdo_dev_policy_free) (struct xfrm_policy *x);
+ };
+ #endif
+ 
+--- a/include/net/xfrm.h
++++ b/include/net/xfrm.h
+@@ -125,11 +125,25 @@ struct xfrm_state_walk {
+ 	struct xfrm_address_filter *filter;
+ };
+ 
++enum {
++	XFRM_DEV_OFFLOAD_IN = 1,
++	XFRM_DEV_OFFLOAD_OUT,
++	XFRM_DEV_OFFLOAD_FWD,
++};
++
++enum {
++	XFRM_DEV_OFFLOAD_UNSPECIFIED,
++	XFRM_DEV_OFFLOAD_CRYPTO,
++	XFRM_DEV_OFFLOAD_PACKET,
++};
++
+ struct xfrm_state_offload {
+ 	struct net_device	*dev;
+ 	unsigned long		offload_handle;
+ 	unsigned int		num_exthdrs;
+ 	u8			flags;
++	u8			dir : 2;
++	u8			type : 2;
+ };
+ 
+ struct xfrm_mode {
+@@ -527,6 +541,8 @@ struct xfrm_policy {
+ 	struct xfrm_tmpl       	xfrm_vec[XFRM_MAX_DEPTH];
+ 	struct hlist_node	bydst_inexact_list;
+ 	struct rcu_head		rcu;
++
++	struct xfrm_state_offload xdo;
+ };
+ 
+ static inline struct net *xp_net(const struct xfrm_policy *xp)
+@@ -1084,6 +1100,29 @@ xfrm_state_addr_cmp(const struct xfrm_tm
+ }
+ 
+ #ifdef CONFIG_XFRM
++static inline struct xfrm_state *xfrm_input_state(struct sk_buff *skb)
++{
++	struct sec_path *sp = skb_sec_path(skb);
++
++	return sp->xvec[sp->len - 1];
++}
++#endif
++
++static inline struct xfrm_offload *xfrm_offload(struct sk_buff *skb)
++{
++#ifdef CONFIG_XFRM
++	struct sec_path *sp = skb_sec_path(skb);
++
++	if (!sp || !sp->olen || sp->len != sp->olen)
++		return NULL;
++
++	return &sp->ovec[sp->olen - 1];
++#else
++	return NULL;
++#endif
++}
++
++#ifdef CONFIG_XFRM
+ int __xfrm_policy_check(struct sock *, int dir, struct sk_buff *skb,
+ 			unsigned short family);
+ 
+@@ -1093,10 +1132,19 @@ static inline int __xfrm_policy_check2(s
+ {
+ 	struct net *net = dev_net(skb->dev);
+ 	int ndir = dir | (reverse ? XFRM_POLICY_MASK + 1 : 0);
++	struct xfrm_offload *xo = xfrm_offload(skb);
++	struct xfrm_state *x;
+ 
+ 	if (sk && sk->sk_policy[XFRM_POLICY_IN])
+ 		return __xfrm_policy_check(sk, ndir, skb, family);
+ 
++	if (xo) {
++		x = xfrm_input_state(skb);
++		if (x->xso.type == XFRM_DEV_OFFLOAD_PACKET)
++			return (xo->flags & CRYPTO_DONE) &&
++			       (xo->status & CRYPTO_SUCCESS);
++	}
++
+ 	return	(!net->xfrm.policy_count[dir] && !secpath_exists(skb)) ||
+ 		(skb_dst(skb) && (skb_dst(skb)->flags & DST_NOPOLICY)) ||
+ 		__xfrm_policy_check(sk, ndir, skb, family);
+@@ -1490,6 +1538,23 @@ struct xfrm_state *xfrm_stateonly_find(s
+ struct xfrm_state *xfrm_state_lookup_byspi(struct net *net, __be32 spi,
+ 					      unsigned short family);
+ int xfrm_state_check_expire(struct xfrm_state *x);
++#ifdef CONFIG_XFRM_OFFLOAD
++static inline void xfrm_dev_state_update_curlft(struct xfrm_state *x)
++{
++	struct xfrm_state_offload *xdo = &x->xso;
++	struct net_device *dev = xdo->dev;
++
++	if (x->xso.type != XFRM_DEV_OFFLOAD_PACKET)
++		return;
++
++	if (dev && dev->xfrmdev_ops &&
++	    dev->xfrmdev_ops->xdo_dev_state_update_curlft)
++		dev->xfrmdev_ops->xdo_dev_state_update_curlft(x);
++
++}
++#else
++static inline void xfrm_dev_state_update_curlft(struct xfrm_state *x) {}
++#endif
+ void xfrm_state_insert(struct xfrm_state *x);
+ int xfrm_state_add(struct xfrm_state *x);
+ int xfrm_state_update(struct xfrm_state *x);
+@@ -1539,6 +1604,8 @@ struct xfrm_state *xfrm_find_acq_byseq(s
+ int xfrm_state_delete(struct xfrm_state *x);
+ int xfrm_state_flush(struct net *net, u8 proto, bool task_valid, bool sync);
+ int xfrm_dev_state_flush(struct net *net, struct net_device *dev, bool task_valid);
++int xfrm_dev_policy_flush(struct net *net, struct net_device *dev,
++			  bool task_valid);
+ void xfrm_sad_getinfo(struct net *net, struct xfrmk_sadinfo *si);
+ void xfrm_spd_getinfo(struct net *net, struct xfrmk_spdinfo *si);
+ u32 xfrm_replay_seqhi(struct xfrm_state *x, __be32 net_seq);
+@@ -1820,29 +1887,6 @@ static inline void xfrm_states_delete(st
+ }
+ #endif
+ 
+-#ifdef CONFIG_XFRM
+-static inline struct xfrm_state *xfrm_input_state(struct sk_buff *skb)
+-{
+-	struct sec_path *sp = skb_sec_path(skb);
+-
+-	return sp->xvec[sp->len - 1];
+-}
+-#endif
+-
+-static inline struct xfrm_offload *xfrm_offload(struct sk_buff *skb)
+-{
+-#ifdef CONFIG_XFRM
+-	struct sec_path *sp = skb_sec_path(skb);
+-
+-	if (!sp || !sp->olen || sp->len != sp->olen)
+-		return NULL;
+-
+-	return &sp->ovec[sp->olen - 1];
+-#else
+-	return NULL;
+-#endif
+-}
+-
+ void __init xfrm_dev_init(void);
+ 
+ #ifdef CONFIG_XFRM_OFFLOAD
+@@ -1851,6 +1895,9 @@ void xfrm_dev_backlog(struct softnet_dat
+ struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features, bool *again);
+ int xfrm_dev_state_add(struct net *net, struct xfrm_state *x,
+ 		       struct xfrm_user_offload *xuo);
++int xfrm_dev_policy_add(struct net *net, struct xfrm_policy *xp,
++			struct xfrm_user_offload *xuo, u8 dir,
++			struct netlink_ext_ack *extack);
+ bool xfrm_dev_offload_ok(struct sk_buff *skb, struct xfrm_state *x);
+ 
+ static inline void xfrm_dev_state_advance_esn(struct xfrm_state *x)
+@@ -1899,6 +1946,27 @@ static inline void xfrm_dev_state_free(s
+ 		dev_put(dev);
+ 	}
+ }
++
++static inline void xfrm_dev_policy_delete(struct xfrm_policy *x)
++{
++	struct xfrm_state_offload *xdo = &x->xdo;
++	struct net_device *dev = xdo->dev;
++
++	if (dev && dev->xfrmdev_ops && dev->xfrmdev_ops->xdo_dev_policy_delete)
++		dev->xfrmdev_ops->xdo_dev_policy_delete(x);
++}
++
++static inline void xfrm_dev_policy_free(struct xfrm_policy *x)
++{
++	struct xfrm_state_offload *xdo = &x->xdo;
++	struct net_device *dev = xdo->dev;
++
++	if (dev && dev->xfrmdev_ops) {
++		if (dev->xfrmdev_ops->xdo_dev_policy_free)
++			dev->xfrmdev_ops->xdo_dev_policy_free(x);
++		xdo->dev = NULL;
++	}
++}
+ #else
+ static inline void xfrm_dev_resume(struct sk_buff *skb)
+ {
+@@ -1931,6 +1999,21 @@ static inline bool xfrm_dev_offload_ok(s
+ 	return false;
+ }
+ 
++static inline int xfrm_dev_policy_add(struct net *net, struct xfrm_policy *xp,
++				      struct xfrm_user_offload *xuo, u8 dir,
++				      struct netlink_ext_ack *extack)
++{
++	return 0;
++}
++
++static inline void xfrm_dev_policy_delete(struct xfrm_policy *x)
++{
++}
++
++static inline void xfrm_dev_policy_free(struct xfrm_policy *x)
++{
++}
++
+ static inline void xfrm_dev_state_advance_esn(struct xfrm_state *x)
+ {
+ }
+--- a/include/uapi/linux/xfrm.h
++++ b/include/uapi/linux/xfrm.h
+@@ -512,6 +512,12 @@ struct xfrm_user_offload {
+  */
+ #define XFRM_OFFLOAD_IPV6	1
+ #define XFRM_OFFLOAD_INBOUND	2
++/* Two bits above are relevant for state path only, while
++ * offload is used for both policy and state flows.
++ *
++ * In policy offload mode, they are free and can be safely reused.
++ */
++#define XFRM_OFFLOAD_PACKET	4
+ 
+ #ifndef __KERNEL__
+ /* backwards compatibility for userspace */
+--- a/net/xfrm/xfrm_device.c
++++ b/net/xfrm/xfrm_device.c
+@@ -80,6 +80,7 @@ struct sk_buff *validate_xmit_xfrm(struc
+ 	struct softnet_data *sd;
+ 	netdev_features_t esp_features = features;
+ 	struct xfrm_offload *xo = xfrm_offload(skb);
++	struct net_device *dev = skb->dev;
+ 	struct sec_path *sp;
+ 
+ 	if (!xo || (xo->flags & XFRM_XMIT))
+@@ -93,6 +94,17 @@ struct sk_buff *validate_xmit_xfrm(struc
+ 	if (xo->flags & XFRM_GRO || x->xso.flags & XFRM_OFFLOAD_INBOUND)
+ 		return skb;
+ 
++	/* The packet was sent to HW IPsec packet offload engine,
++	 * but to wrong device. Drop the packet, so it won't skip
++	 * XFRM stack.
++	 */
++	if (x->xso.type == XFRM_DEV_OFFLOAD_PACKET && x->xso.dev != dev) {
++		kfree_skb(skb);
++		//dev_core_stats_tx_dropped_inc(dev);
++		atomic_long_inc(&dev->tx_dropped);
++		return NULL;
++	}
++
+ 	local_irq_save(flags);
+ 	sd = this_cpu_ptr(&softnet_data);
+ 	err = !skb_queue_empty(&sd->xfrm_backlog);
+@@ -198,6 +210,7 @@ int xfrm_dev_state_add(struct net *net,
+ 	struct xfrm_state_offload *xso = &x->xso;
+ 	xfrm_address_t *saddr;
+ 	xfrm_address_t *daddr;
++	bool is_packet_offload;
+ 
+ 	if (!x->type_offload)
+ 		return -EINVAL;
+@@ -206,9 +219,11 @@ int xfrm_dev_state_add(struct net *net,
+ 	if (x->encap || x->tfcpad)
+ 		return -EINVAL;
+ 
+-	if (xuo->flags & ~(XFRM_OFFLOAD_IPV6 | XFRM_OFFLOAD_INBOUND))
++	if (xuo->flags &
++	    ~(XFRM_OFFLOAD_IPV6 | XFRM_OFFLOAD_INBOUND | XFRM_OFFLOAD_PACKET))
+ 		return -EINVAL;
+ 
++	is_packet_offload = xuo->flags & XFRM_OFFLOAD_PACKET;
+ 	dev = dev_get_by_index(net, xuo->ifindex);
+ 	if (!dev) {
+ 		if (!(xuo->flags & XFRM_OFFLOAD_INBOUND)) {
+@@ -223,7 +238,7 @@ int xfrm_dev_state_add(struct net *net,
+ 					x->props.family,
+ 					xfrm_smark_get(0, x));
+ 		if (IS_ERR(dst))
+-			return 0;
++			return (is_packet_offload) ? -EINVAL : 0;
+ 
+ 		dev = dst->dev;
+ 
+@@ -234,7 +249,7 @@ int xfrm_dev_state_add(struct net *net,
+ 	if (!dev->xfrmdev_ops || !dev->xfrmdev_ops->xdo_dev_state_add) {
+ 		xso->dev = NULL;
+ 		dev_put(dev);
+-		return 0;
++		return (is_packet_offload) ? -EINVAL : 0;
+ 	}
+ 
+ 	if (x->props.flags & XFRM_STATE_ESN &&
+@@ -249,14 +264,28 @@ int xfrm_dev_state_add(struct net *net,
+ 	/* Don't forward bit that is not implemented */
+ 	xso->flags = xuo->flags & ~XFRM_OFFLOAD_IPV6;
+ 
++	if (is_packet_offload)
++		xso->type = XFRM_DEV_OFFLOAD_PACKET;
++	else
++		xso->type = XFRM_DEV_OFFLOAD_CRYPTO;
++
+ 	err = dev->xfrmdev_ops->xdo_dev_state_add(x);
+ 	if (err) {
+ 		xso->num_exthdrs = 0;
+ 		xso->flags = 0;
+ 		xso->dev = NULL;
+ 		dev_put(dev);
++		xso->type = XFRM_DEV_OFFLOAD_UNSPECIFIED;
+ 
+-		if (err != -EOPNOTSUPP)
++		/* User explicitly requested packet offload mode and configured
++		 * policy in addition to the XFRM state. So be civil to users,
++		 * and return an error instead of taking fallback path.
++		 *
++		 * This WARN_ON() can be seen as a documentation for driver
++		 * authors to do not return -EOPNOTSUPP in packet offload mode.
++		 */
++		WARN_ON(err == -EOPNOTSUPP && is_packet_offload);
++		if (err != -EOPNOTSUPP || is_packet_offload)
+ 			return err;
+ 	}
+ 
+@@ -264,6 +293,65 @@ int xfrm_dev_state_add(struct net *net,
+ }
+ EXPORT_SYMBOL_GPL(xfrm_dev_state_add);
+ 
++int xfrm_dev_policy_add(struct net *net, struct xfrm_policy *xp,
++			struct xfrm_user_offload *xuo, u8 dir,
++			struct netlink_ext_ack *extack)
++{
++	struct xfrm_state_offload *xdo = &xp->xdo;
++	struct net_device *dev;
++	int err;
++
++	if (!xuo->flags || xuo->flags & ~XFRM_OFFLOAD_PACKET) {
++		/* We support only packet offload mode and it means
++		 * that user must set XFRM_OFFLOAD_PACKET bit.
++		 */
++		NL_SET_ERR_MSG(extack, "Unrecognized flags in offload request");
++		return -EINVAL;
++	}
++
++	dev = dev_get_by_index(net, xuo->ifindex);
++	if (!dev)
++		return -EINVAL;
++
++	if (!dev->xfrmdev_ops || !dev->xfrmdev_ops->xdo_dev_policy_add) {
++		xdo->dev = NULL;
++		dev_put(dev);
++		NL_SET_ERR_MSG(extack, "Policy offload is not supported");
++		return -EINVAL;
++	}
++
++	xdo->dev = dev;
++	xdo->type = XFRM_DEV_OFFLOAD_PACKET;
++	switch (dir) {
++	case XFRM_POLICY_IN:
++		xdo->dir = XFRM_DEV_OFFLOAD_IN;
++		break;
++	case XFRM_POLICY_OUT:
++		xdo->dir = XFRM_DEV_OFFLOAD_OUT;
++		break;
++	case XFRM_POLICY_FWD:
++		xdo->dir = XFRM_DEV_OFFLOAD_FWD;
++		break;
++	default:
++		xdo->dev = NULL;
++		dev_put(dev);
++		NL_SET_ERR_MSG(extack, "Unrecognized oflload direction");
++		return -EINVAL;
++	}
++
++	err = dev->xfrmdev_ops->xdo_dev_policy_add(xp);
++	if (err) {
++		xdo->dev = NULL;
++		xdo->type = XFRM_DEV_OFFLOAD_UNSPECIFIED;
++		xdo->dir = 0;
++		NL_SET_ERR_MSG(extack, "Device failed to offload this policy");
++		return err;
++	}
++
++	return 0;
++}
++EXPORT_SYMBOL_GPL(xfrm_dev_policy_add);
++
+ bool xfrm_dev_offload_ok(struct sk_buff *skb, struct xfrm_state *x)
+ {
+ 	int mtu;
+@@ -274,8 +362,9 @@ bool xfrm_dev_offload_ok(struct sk_buff
+ 	if (!x->type_offload || x->encap)
+ 		return false;
+ 
+-	if ((!dev || (dev == xfrm_dst_path(dst)->dev)) &&
+-	    (!xdst->child->xfrm)) {
++	if (x->xso.type == XFRM_DEV_OFFLOAD_PACKET ||
++	    ((!dev || (dev == xfrm_dst_path(dst)->dev)) &&
++	     !xdst->child->xfrm)) {
+ 		mtu = xfrm_state_mtu(x, xdst->child_mtu_cached);
+ 		if (skb->len <= mtu)
+ 			goto ok;
+@@ -376,8 +465,10 @@ static int xfrm_dev_feat_change(struct n
+ 
+ static int xfrm_dev_down(struct net_device *dev)
+ {
+-	if (dev->features & NETIF_F_HW_ESP)
++	if (dev->features & NETIF_F_HW_ESP) {
+ 		xfrm_dev_state_flush(dev_net(dev), dev, true);
++		xfrm_dev_policy_flush(dev_net(dev), dev, true);
++	}
+ 
+ 	return NOTIFY_DONE;
+ }
+--- a/net/xfrm/xfrm_output.c
++++ b/net/xfrm/xfrm_output.c
+@@ -410,7 +410,7 @@ static int xfrm_output_one(struct sk_buf
+ 	struct xfrm_state *x = dst->xfrm;
+ 	struct net *net = xs_net(x);
+ 
+-	if (err <= 0)
++	if (err <= 0 || x->xso.type == XFRM_DEV_OFFLOAD_PACKET)
+ 		goto resume;
+ 
+ 	do {
+@@ -568,6 +568,16 @@ int xfrm_output(struct sock *sk, struct
+ 	struct xfrm_state *x = skb_dst(skb)->xfrm;
+ 	int err;
+ 
++	if (x->xso.type == XFRM_DEV_OFFLOAD_PACKET) {
++		if (!xfrm_dev_offload_ok(skb, x)) {
++			XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTERROR);
++			kfree_skb(skb);
++			return -EHOSTUNREACH;
++		}
++
++		return xfrm_output_resume(skb, 0);
++	}
++
+ 	secpath_reset(skb);
+ 
+ 	if (xfrm_dev_offload_ok(skb, x)) {
+--- a/net/xfrm/xfrm_policy.c
++++ b/net/xfrm/xfrm_policy.c
+@@ -423,6 +423,7 @@ void xfrm_policy_destroy(struct xfrm_pol
+ 	if (del_timer(&policy->timer) || del_timer(&policy->polq.hold_timer))
+ 		BUG();
+ 
++	xfrm_dev_policy_free(policy);
+ 	call_rcu(&policy->rcu, xfrm_policy_destroy_rcu);
+ }
+ EXPORT_SYMBOL(xfrm_policy_destroy);
+@@ -533,7 +534,7 @@ redo:
+ 		__get_hash_thresh(net, pol->family, dir, &dbits, &sbits);
+ 		h = __addr_hash(&pol->selector.daddr, &pol->selector.saddr,
+ 				pol->family, nhashmask, dbits, sbits);
+-		if (!entry0) {
++		if (!entry0 || pol->xdo.type == XFRM_DEV_OFFLOAD_PACKET) {
+ 			hlist_del_rcu(&pol->bydst);
+ 			hlist_add_head_rcu(&pol->bydst, ndsttable + h);
+ 			h0 = h;
+@@ -864,7 +865,7 @@ static void xfrm_policy_inexact_list_rei
+ 				break;
+ 		}
+ 
+-		if (newpos)
++		if (newpos && policy->xdo.type != XFRM_DEV_OFFLOAD_PACKET)
+ 			hlist_add_behind_rcu(&policy->bydst, newpos);
+ 		else
+ 			hlist_add_head_rcu(&policy->bydst, &n->hhead);
+@@ -1345,7 +1346,7 @@ static void xfrm_hash_rebuild(struct wor
+ 			else
+ 				break;
+ 		}
+-		if (newpos)
++		if (newpos && policy->xdo.type != XFRM_DEV_OFFLOAD_PACKET)
+ 			hlist_add_behind_rcu(&policy->bydst, newpos);
+ 		else
+ 			hlist_add_head_rcu(&policy->bydst, chain);
+@@ -1522,7 +1523,7 @@ static void xfrm_policy_insert_inexact_l
+ 			break;
+ 	}
+ 
+-	if (newpos)
++	if (newpos && policy->xdo.type != XFRM_DEV_OFFLOAD_PACKET)
+ 		hlist_add_behind_rcu(&policy->bydst_inexact_list, newpos);
+ 	else
+ 		hlist_add_head_rcu(&policy->bydst_inexact_list, chain);
+@@ -1559,9 +1560,12 @@ static struct xfrm_policy *xfrm_policy_i
+ 			break;
+ 	}
+ 
+-	if (newpos)
++	if (newpos && policy->xdo.type != XFRM_DEV_OFFLOAD_PACKET)
+ 		hlist_add_behind_rcu(&policy->bydst, &newpos->bydst);
+ 	else
++		/* Packet offload policies enter to the head
++		 * to speed-up lookups.
++		 */
+ 		hlist_add_head_rcu(&policy->bydst, chain);
+ 
+ 	return delpol;
+@@ -1767,12 +1771,41 @@ xfrm_policy_flush_secctx_check(struct ne
+ 	}
+ 	return err;
+ }
++
++static inline int xfrm_dev_policy_flush_secctx_check(struct net *net,
++						     struct net_device *dev,
++						     bool task_valid)
++{
++	struct xfrm_policy *pol;
++	int err = 0;
++
++	list_for_each_entry(pol, &net->xfrm.policy_all, walk.all) {
++		if (pol->walk.dead ||
++		    xfrm_policy_id2dir(pol->index) >= XFRM_POLICY_MAX ||
++		    pol->xdo.dev != dev)
++			continue;
++
++		err = security_xfrm_policy_delete(pol->security);
++		if (err) {
++			xfrm_audit_policy_delete(pol, 0, task_valid);
++			return err;
++		}
++	}
++	return err;
++}
+ #else
+ static inline int
+ xfrm_policy_flush_secctx_check(struct net *net, u8 type, bool task_valid)
+ {
+ 	return 0;
+ }
++
++static inline int xfrm_dev_policy_flush_secctx_check(struct net *net,
++						     struct net_device *dev,
++						     bool task_valid)
++{
++	return 0;
++}
+ #endif
+ 
+ int xfrm_policy_flush(struct net *net, u8 type, bool task_valid)
+@@ -1812,6 +1845,44 @@ out:
+ }
+ EXPORT_SYMBOL(xfrm_policy_flush);
+ 
++int xfrm_dev_policy_flush(struct net *net, struct net_device *dev,
++			  bool task_valid)
++{
++	int dir, err = 0, cnt = 0;
++	struct xfrm_policy *pol;
++
++	spin_lock_bh(&net->xfrm.xfrm_policy_lock);
++
++	err = xfrm_dev_policy_flush_secctx_check(net, dev, task_valid);
++	if (err)
++		goto out;
++
++again:
++	list_for_each_entry(pol, &net->xfrm.policy_all, walk.all) {
++		dir = xfrm_policy_id2dir(pol->index);
++		if (pol->walk.dead ||
++		    dir >= XFRM_POLICY_MAX ||
++		    pol->xdo.dev != dev)
++			continue;
++
++		__xfrm_policy_unlink(pol, dir);
++		spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
++		cnt++;
++		xfrm_audit_policy_delete(pol, 1, task_valid);
++		xfrm_policy_kill(pol);
++		spin_lock_bh(&net->xfrm.xfrm_policy_lock);
++		goto again;
++	}
++	if (cnt)
++		__xfrm_policy_inexact_flush(net);
++	else
++		err = -ESRCH;
++out:
++	spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
++	return err;
++}
++EXPORT_SYMBOL(xfrm_dev_policy_flush);
++
+ int xfrm_policy_walk(struct net *net, struct xfrm_policy_walk *walk,
+ 		     int (*func)(struct xfrm_policy *, int, int, void*),
+ 		     void *data)
+@@ -2113,6 +2184,9 @@ static struct xfrm_policy *xfrm_policy_l
+ 			break;
+ 		}
+ 	}
++	if (ret && ret->xdo.type == XFRM_DEV_OFFLOAD_PACKET)
++		goto skip_inexact;
++
+ 	bin = xfrm_policy_inexact_lookup_rcu(net, type, family, dir, if_id);
+ 	if (!bin || !xfrm_policy_find_inexact_candidates(&cand, bin, saddr,
+ 							 daddr))
+@@ -2246,6 +2320,7 @@ int xfrm_policy_delete(struct xfrm_polic
+ 	pol = __xfrm_policy_unlink(pol, dir);
+ 	spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
+ 	if (pol) {
++		xfrm_dev_policy_delete(pol);
+ 		xfrm_policy_kill(pol);
+ 		return 0;
+ 	}
+--- a/net/xfrm/xfrm_state.c
++++ b/net/xfrm/xfrm_state.c
+@@ -78,6 +78,25 @@ xfrm_spi_hash(struct net *net, const xfr
+ 	return __xfrm_spi_hash(daddr, spi, proto, family, net->xfrm.state_hmask);
+ }
+ 
++#define XFRM_STATE_INSERT(by, _n, _h, _type)                               \
++	{                                                                  \
++		struct xfrm_state *_x = NULL;                              \
++									   \
++		if (_type != XFRM_DEV_OFFLOAD_PACKET) {                    \
++			hlist_for_each_entry_rcu(_x, _h, by) {             \
++				if (_x->xso.type == XFRM_DEV_OFFLOAD_PACKET) \
++					continue;                          \
++				break;                                     \
++			}                                                  \
++		}                                                          \
++									   \
++		if (!_x || _x->xso.type == XFRM_DEV_OFFLOAD_PACKET)        \
++			/* SAD is empty or consist from HW SAs only */     \
++			hlist_add_head_rcu(_n, _h);                        \
++		else                                                       \
++			hlist_add_before_rcu(_n, &_x->by);                 \
++	}
++
+ static void xfrm_hash_transfer(struct hlist_head *list,
+ 			       struct hlist_head *ndsttable,
+ 			       struct hlist_head *nsrctable,
+@@ -93,18 +112,20 @@ static void xfrm_hash_transfer(struct hl
+ 		h = __xfrm_dst_hash(&x->id.daddr, &x->props.saddr,
+ 				    x->props.reqid, x->props.family,
+ 				    nhashmask);
+-		hlist_add_head_rcu(&x->bydst, ndsttable + h);
++		XFRM_STATE_INSERT(bydst, &x->bydst, ndsttable + h, x->xso.type);
+ 
+ 		h = __xfrm_src_hash(&x->id.daddr, &x->props.saddr,
+ 				    x->props.family,
+ 				    nhashmask);
+-		hlist_add_head_rcu(&x->bysrc, nsrctable + h);
++		XFRM_STATE_INSERT(bysrc, &x->bysrc, nsrctable + h, x->xso.type);
+ 
+ 		if (x->id.spi) {
+ 			h = __xfrm_spi_hash(&x->id.daddr, x->id.spi,
+ 					    x->id.proto, x->props.family,
+ 					    nhashmask);
+ 			hlist_add_head_rcu(&x->byspi, nspitable + h);
++			XFRM_STATE_INSERT(byspi, &x->byspi, nspitable + h,
++					  x->xso.type);
+ 		}
+ 	}
+ }
+@@ -527,6 +548,8 @@ static enum hrtimer_restart xfrm_timer_h
+ 	int err = 0;
+ 
+ 	spin_lock(&x->lock);
++	xfrm_dev_state_update_curlft(x);
++
+ 	if (x->km.state == XFRM_STATE_DEAD)
+ 		goto out;
+ 	if (x->km.state == XFRM_STATE_EXPIRED)
+@@ -923,6 +946,49 @@ xfrm_init_tempstate(struct xfrm_state *x
+ 	x->props.family = tmpl->encap_family;
+ }
+ 
++static struct xfrm_state *__xfrm_state_lookup_all(struct net *net, u32 mark,
++						  const xfrm_address_t *daddr,
++						  __be32 spi, u8 proto,
++						  unsigned short family,
++						  struct xfrm_state_offload *xdo)
++{
++	unsigned int h = xfrm_spi_hash(net, daddr, spi, proto, family);
++	struct xfrm_state *x;
++
++	hlist_for_each_entry_rcu(x, net->xfrm.state_byspi + h, byspi) {
++#ifdef CONFIG_XFRM_OFFLOAD
++		if (xdo->type == XFRM_DEV_OFFLOAD_PACKET) {
++			if (x->xso.type != XFRM_DEV_OFFLOAD_PACKET)
++				/* HW states are in the head of list, there is
++				 * no need to iterate further.
++				 */
++				break;
++
++			/* Packet offload: both policy and SA should
++			 * have same device.
++			 */
++			if (xdo->dev != x->xso.dev)
++				continue;
++		} else if (x->xso.type == XFRM_DEV_OFFLOAD_PACKET)
++			/* Skip HW policy for SW lookups */
++			continue;
++#endif
++		if (x->props.family != family ||
++		    x->id.spi       != spi ||
++		    x->id.proto     != proto ||
++		    !xfrm_addr_equal(&x->id.daddr, daddr, family))
++			continue;
++
++		if ((mark & x->mark.m) != x->mark.v)
++			continue;
++		if (!xfrm_state_hold_rcu(x))
++			continue;
++		return x;
++	}
++
++	return NULL;
++}
++
+ static struct xfrm_state *__xfrm_state_lookup(struct net *net, u32 mark,
+ 					      const xfrm_address_t *daddr,
+ 					      __be32 spi, u8 proto,
+@@ -1062,6 +1128,23 @@ xfrm_state_find(const xfrm_address_t *da
+ 	rcu_read_lock();
+ 	h = xfrm_dst_hash(net, daddr, saddr, tmpl->reqid, encap_family);
+ 	hlist_for_each_entry_rcu(x, net->xfrm.state_bydst + h, bydst) {
++#ifdef CONFIG_XFRM_OFFLOAD
++		if (pol->xdo.type == XFRM_DEV_OFFLOAD_PACKET) {
++			if (x->xso.type != XFRM_DEV_OFFLOAD_PACKET)
++				/* HW states are in the head of list, there is
++				 * no need to iterate further.
++				 */
++				break;
++
++			/* Packet offload: both policy and SA should
++			 * have same device.
++			 */
++			if (pol->xdo.dev != x->xso.dev)
++				continue;
++		} else if (x->xso.type == XFRM_DEV_OFFLOAD_PACKET)
++			/* Skip HW policy for SW lookups */
++			continue;
++#endif
+ 		if (x->props.family == encap_family &&
+ 		    x->props.reqid == tmpl->reqid &&
+ 		    (mark & x->mark.m) == x->mark.v &&
+@@ -1079,6 +1162,23 @@ xfrm_state_find(const xfrm_address_t *da
+ 
+ 	h_wildcard = xfrm_dst_hash(net, daddr, &saddr_wildcard, tmpl->reqid, encap_family);
+ 	hlist_for_each_entry_rcu(x, net->xfrm.state_bydst + h_wildcard, bydst) {
++#ifdef CONFIG_XFRM_OFFLOAD
++		if (pol->xdo.type == XFRM_DEV_OFFLOAD_PACKET) {
++			if (x->xso.type != XFRM_DEV_OFFLOAD_PACKET)
++				/* HW states are in the head of list, there is
++				 * no need to iterate further.
++				 */
++				break;
++
++			/* Packet offload: both policy and SA should
++			 * have same device.
++			 */
++			if (pol->xdo.dev != x->xso.dev)
++				continue;
++		} else if (x->xso.type == XFRM_DEV_OFFLOAD_PACKET)
++			/* Skip HW policy for SW lookups */
++			continue;
++#endif
+ 		if (x->props.family == encap_family &&
+ 		    x->props.reqid == tmpl->reqid &&
+ 		    (mark & x->mark.m) == x->mark.v &&
+@@ -1096,8 +1196,10 @@ found:
+ 	x = best;
+ 	if (!x && !error && !acquire_in_progress) {
+ 		if (tmpl->id.spi &&
+-		    (x0 = __xfrm_state_lookup(net, mark, daddr, tmpl->id.spi,
+-					      tmpl->id.proto, encap_family)) != NULL) {
++		    (x0 = __xfrm_state_lookup_all(net, mark, daddr,
++						  tmpl->id.spi, tmpl->id.proto,
++						  encap_family,
++						  &pol->xdo)) != NULL) {
+ 			to_put = x0;
+ 			error = -EEXIST;
+ 			goto out;
+@@ -1131,17 +1233,42 @@ found:
+ 			x = NULL;
+ 			goto out;
+ 		}
+-
++#ifdef CONFIG_XFRM_OFFLOAD
++		if (pol->xdo.type == XFRM_DEV_OFFLOAD_PACKET) {
++			struct xfrm_state_offload *xdo = &pol->xdo;
++			struct xfrm_state_offload *xso = &x->xso;
++
++			xso->type = XFRM_DEV_OFFLOAD_PACKET;
++			xso->dir = xdo->dir;
++			xso->dev = xdo->dev;
++			error = xso->dev->xfrmdev_ops->xdo_dev_state_add(x);
++			if (error) {
++				xso->dir = 0;
++				xso->dev = NULL;
++				xso->type = XFRM_DEV_OFFLOAD_UNSPECIFIED;
++				x->km.state = XFRM_STATE_DEAD;
++				to_put = x;
++				x = NULL;
++				goto out;
++			}
++		}
++#endif
+ 		if (km_query(x, tmpl, pol) == 0) {
+ 			spin_lock_bh(&net->xfrm.xfrm_state_lock);
+ 			x->km.state = XFRM_STATE_ACQ;
+ 			list_add(&x->km.all, &net->xfrm.state_all);
+-			hlist_add_head_rcu(&x->bydst, net->xfrm.state_bydst + h);
++			XFRM_STATE_INSERT(bydst, &x->bydst,
++					  net->xfrm.state_bydst + h,
++					  x->xso.type);
+ 			h = xfrm_src_hash(net, daddr, saddr, encap_family);
+-			hlist_add_head_rcu(&x->bysrc, net->xfrm.state_bysrc + h);
++			XFRM_STATE_INSERT(bysrc, &x->bysrc,
++					  net->xfrm.state_bysrc + h,
++					  x->xso.type);
+ 			if (x->id.spi) {
+ 				h = xfrm_spi_hash(net, &x->id.daddr, x->id.spi, x->id.proto, encap_family);
+-				hlist_add_head_rcu(&x->byspi, net->xfrm.state_byspi + h);
++				XFRM_STATE_INSERT(byspi, &x->byspi,
++						  net->xfrm.state_byspi + h,
++						  x->xso.type);
+ 			}
+ 			x->lft.hard_add_expires_seconds = net->xfrm.sysctl_acq_expires;
+ 			hrtimer_start(&x->mtimer,
+@@ -1151,6 +1278,16 @@ found:
+ 			xfrm_hash_grow_check(net, x->bydst.next != NULL);
+ 			spin_unlock_bh(&net->xfrm.xfrm_state_lock);
+ 		} else {
++#ifdef CONFIG_XFRM_OFFLOAD
++			struct xfrm_state_offload *xso = &x->xso;
++
++			if (xso->type == XFRM_DEV_OFFLOAD_PACKET) {
++				xso->dev->xfrmdev_ops->xdo_dev_state_delete(x);
++				xso->dir = 0;
++				xso->dev = NULL;
++				xso->type = XFRM_DEV_OFFLOAD_UNSPECIFIED;
++			}
++#endif
+ 			x->km.state = XFRM_STATE_DEAD;
+ 			to_put = x;
+ 			x = NULL;
+@@ -1246,16 +1383,19 @@ static void __xfrm_state_insert(struct x
+ 
+ 	h = xfrm_dst_hash(net, &x->id.daddr, &x->props.saddr,
+ 			  x->props.reqid, x->props.family);
+-	hlist_add_head_rcu(&x->bydst, net->xfrm.state_bydst + h);
++	XFRM_STATE_INSERT(bydst, &x->bydst, net->xfrm.state_bydst + h,
++			  x->xso.type);
+ 
+ 	h = xfrm_src_hash(net, &x->id.daddr, &x->props.saddr, x->props.family);
+-	hlist_add_head_rcu(&x->bysrc, net->xfrm.state_bysrc + h);
++	XFRM_STATE_INSERT(bysrc, &x->bysrc, net->xfrm.state_bysrc + h,
++			  x->xso.type);
+ 
+ 	if (x->id.spi) {
+ 		h = xfrm_spi_hash(net, &x->id.daddr, x->id.spi, x->id.proto,
+ 				  x->props.family);
+ 
+-		hlist_add_head_rcu(&x->byspi, net->xfrm.state_byspi + h);
++		XFRM_STATE_INSERT(byspi, &x->byspi, net->xfrm.state_byspi + h,
++				  x->xso.type);
+ 	}
+ 
+ 	hrtimer_start(&x->mtimer, ktime_set(1, 0), HRTIMER_MODE_REL_SOFT);
+@@ -1369,9 +1509,11 @@ static struct xfrm_state *__find_acq_cor
+ 			      ktime_set(net->xfrm.sysctl_acq_expires, 0),
+ 			      HRTIMER_MODE_REL_SOFT);
+ 		list_add(&x->km.all, &net->xfrm.state_all);
+-		hlist_add_head_rcu(&x->bydst, net->xfrm.state_bydst + h);
++		XFRM_STATE_INSERT(bydst, &x->bydst, net->xfrm.state_bydst + h,
++				  x->xso.type);
+ 		h = xfrm_src_hash(net, daddr, saddr, family);
+-		hlist_add_head_rcu(&x->bysrc, net->xfrm.state_bysrc + h);
++		XFRM_STATE_INSERT(bysrc, &x->bysrc, net->xfrm.state_bysrc + h,
++				  x->xso.type);
+ 
+ 		net->xfrm.state_num++;
+ 
+@@ -1742,6 +1884,8 @@ EXPORT_SYMBOL(xfrm_state_update);
+ 
+ int xfrm_state_check_expire(struct xfrm_state *x)
+ {
++	xfrm_dev_state_update_curlft(x);
++
+ 	if (!x->curlft.use_time)
+ 		x->curlft.use_time = ktime_get_real_seconds();
+ 
+@@ -2043,7 +2187,8 @@ int xfrm_alloc_spi(struct xfrm_state *x,
+ 		spin_lock_bh(&net->xfrm.xfrm_state_lock);
+ 		x->id.spi = newspi;
+ 		h = xfrm_spi_hash(net, &x->id.daddr, x->id.spi, x->id.proto, x->props.family);
+-		hlist_add_head_rcu(&x->byspi, net->xfrm.state_byspi + h);
++		XFRM_STATE_INSERT(byspi, &x->byspi, net->xfrm.state_byspi + h,
++				  x->xso.type);
+ 		spin_unlock_bh(&net->xfrm.xfrm_state_lock);
+ 
+ 		err = 0;
+--- a/net/xfrm/xfrm_user.c
++++ b/net/xfrm/xfrm_user.c
+@@ -844,6 +844,8 @@ static int copy_user_offload(struct xfrm
+ 	memset(xuo, 0, sizeof(*xuo));
+ 	xuo->ifindex = xso->dev->ifindex;
+ 	xuo->flags = xso->flags;
++	if (xso->type == XFRM_DEV_OFFLOAD_PACKET)
++		xuo->flags |= XFRM_OFFLOAD_PACKET;
+ 
+ 	return 0;
+ }
+@@ -1634,6 +1636,15 @@ static struct xfrm_policy *xfrm_policy_c
+ 	if (attrs[XFRMA_IF_ID])
+ 		xp->if_id = nla_get_u32(attrs[XFRMA_IF_ID]);
+ 
++	/* configure the hardware if offload is requested */
++	if (attrs[XFRMA_OFFLOAD_DEV]) {
++		err = xfrm_dev_policy_add(net, xp,
++					  nla_data(attrs[XFRMA_OFFLOAD_DEV]),
++					  p->dir, 0);
++		if (err)
++			goto error;
++	}
++
+ 	return xp;
+  error:
+ 	*errp = err;
+@@ -1672,6 +1683,7 @@ static int xfrm_add_policy(struct sk_buf
+ 	xfrm_audit_policy_add(xp, err ? 0 : 1, true);
+ 
+ 	if (err) {
++		xfrm_dev_policy_delete(xp);
+ 		security_xfrm_policy_free(xp->security);
+ 		kfree(xp);
+ 		return err;
+@@ -1783,6 +1795,8 @@ static int dump_one_policy(struct xfrm_p
+ 		err = xfrm_mark_put(skb, &xp->mark);
+ 	if (!err)
+ 		err = xfrm_if_id_put(skb, xp->if_id);
++	if (!err && xp->xdo.dev)
++		err = copy_user_offload(&xp->xdo, skb);
+ 	if (err) {
+ 		nlmsg_cancel(skb, nlh);
+ 		return err;
+@@ -2958,6 +2972,8 @@ static int build_acquire(struct sk_buff
+ 		err = xfrm_mark_put(skb, &xp->mark);
+ 	if (!err)
+ 		err = xfrm_if_id_put(skb, xp->if_id);
++	if (!err && xp->xdo.dev)
++		err = copy_user_offload(&xp->xdo, skb);
+ 	if (err) {
+ 		nlmsg_cancel(skb, nlh);
+ 		return err;
+@@ -3076,6 +3092,8 @@ static int build_polexpire(struct sk_buf
+ 		err = xfrm_mark_put(skb, &xp->mark);
+ 	if (!err)
+ 		err = xfrm_if_id_put(skb, xp->if_id);
++	if (!err && xp->xdo.dev)
++		err = copy_user_offload(&xp->xdo, skb);
+ 	if (err) {
+ 		nlmsg_cancel(skb, nlh);
+ 		return err;
+@@ -3159,6 +3177,8 @@ static int xfrm_notify_policy(struct xfr
+ 		err = xfrm_mark_put(skb, &xp->mark);
+ 	if (!err)
+ 		err = xfrm_if_id_put(skb, xp->if_id);
++	if (!err && xp->xdo.dev)
++		err = copy_user_offload(&xp->xdo, skb);
+ 	if (err)
+ 		goto out_free_skb;
+ 
diff --git a/target/linux/mediatek/patches-5.4/999-2728-xfrm-extend-packet-mode-to-support-esp-tunnel-mode.patch b/target/linux/mediatek/patches-5.4/999-2728-xfrm-extend-packet-mode-to-support-esp-tunnel-mode.patch
new file mode 100644
index 0000000..ba9bc98
--- /dev/null
+++ b/target/linux/mediatek/patches-5.4/999-2728-xfrm-extend-packet-mode-to-support-esp-tunnel-mode.patch
@@ -0,0 +1,28 @@
+--- a/net/xfrm/xfrm_output.c
++++ b/net/xfrm/xfrm_output.c
+@@ -410,7 +410,7 @@ static int xfrm_output_one(struct sk_buf
+ 	struct xfrm_state *x = dst->xfrm;
+ 	struct net *net = xs_net(x);
+ 
+-	if (err <= 0 || x->xso.type == XFRM_DEV_OFFLOAD_PACKET)
++	if (err <= 0)
+ 		goto resume;
+ 
+ 	do {
+@@ -568,16 +568,6 @@ int xfrm_output(struct sock *sk, struct
+ 	struct xfrm_state *x = skb_dst(skb)->xfrm;
+ 	int err;
+ 
+-	if (x->xso.type == XFRM_DEV_OFFLOAD_PACKET) {
+-		if (!xfrm_dev_offload_ok(skb, x)) {
+-			XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTERROR);
+-			kfree_skb(skb);
+-			return -EHOSTUNREACH;
+-		}
+-
+-		return xfrm_output_resume(skb, 0);
+-	}
+-
+ 	secpath_reset(skb);
+ 
+ 	if (xfrm_dev_offload_ok(skb, x)) {
diff --git a/target/linux/mediatek/patches-5.4/999-4103-mtk-crypto-esp-offload-support.patch b/target/linux/mediatek/patches-5.4/999-4103-mtk-crypto-esp-offload-support.patch
new file mode 100644
index 0000000..a77ef85
--- /dev/null
+++ b/target/linux/mediatek/patches-5.4/999-4103-mtk-crypto-esp-offload-support.patch
@@ -0,0 +1,168 @@
+--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+@@ -2316,6 +2316,7 @@ static int mtk_poll_rx(struct napi_struc
+ 
+ 		skb_hnat_alg(skb) = 0;
+ 		skb_hnat_filled(skb) = 0;
++		skb_hnat_set_cdrt(skb, 0);
+ 		skb_hnat_magic_tag(skb) = HNAT_MAGIC_TAG;
+ 		skb_hnat_set_tops(skb, 0);
+ 		skb_hnat_set_is_decap(skb, 0);
+--- a/drivers/net/ethernet/mediatek/mtk_hnat/hnat_nf_hook.c
++++ b/drivers/net/ethernet/mediatek/mtk_hnat/hnat_nf_hook.c
+@@ -1075,6 +1075,9 @@ static unsigned int hnat_ipv4_get_nextho
+ 		return 0;
+ 	}
+ 
++	if (!skb_hnat_cdrt(skb) && dst && dst_xfrm(dst))
++		return 0;
++
+ 	rcu_read_lock_bh();
+ 	nexthop = (__force u32)rt_nexthop(rt, ip_hdr(skb)->daddr);
+ 	neigh = __ipv4_neigh_lookup_noref(dev, nexthop);
+@@ -1096,7 +1099,7 @@ static unsigned int hnat_ipv4_get_nextho
+ 	 * outer header, we must update its outer mac header pointer
+ 	 * before filling outer mac or it may screw up inner mac
+ 	 */
+-	if (skb_hnat_tops(skb) && skb_hnat_is_encap(skb)) {
++	if ((skb_hnat_tops(skb) && skb_hnat_is_encap(skb)) || skb_hnat_cdrt(skb)) {
+ 		skb_push(skb, sizeof(struct ethhdr));
+ 		skb_reset_mac_header(skb);
+ 	}
+@@ -1104,7 +1107,7 @@ static unsigned int hnat_ipv4_get_nextho
+ 	memcpy(eth_hdr(skb)->h_dest, neigh->ha, ETH_ALEN);
+ 	memcpy(eth_hdr(skb)->h_source, out->dev_addr, ETH_ALEN);
+ 
+-	if (skb_hnat_tops(skb) && skb_hnat_is_encap(skb))
++	if ((skb_hnat_tops(skb) && skb_hnat_is_encap(skb)) || skb_hnat_cdrt(skb))
+ 		skb_pull(skb, sizeof(struct ethhdr));
+ 
+ 	rcu_read_unlock_bh();
+@@ -1289,6 +1292,9 @@ static inline void hnat_fill_offload_eng
+ 		 */
+ 		entry->ipv4_hnapt.tport_id = NR_TDMA_QDMA_TPORT;
+ 		entry->ipv4_hnapt.tops_entry = skb_hnat_tops(skb);
++	} else if (skb_hnat_cdrt(skb)) {
++		entry->ipv4_hnapt.tport_id = NR_EIP197_QDMA_TPORT;
++		entry->ipv4_hnapt.cdrt_id = skb_hnat_cdrt(skb);
+ 	} else {
+ 		return;
+ 	}
+@@ -1334,7 +1340,8 @@ static unsigned int skb_to_hnat_info(str
+ 	if (whnat && is_hnat_pre_filled(foe))
+ 		return 0;
+ 
+-	if (skb_hnat_tops(skb) && !(hw_path->flags & FLOW_OFFLOAD_PATH_TNL)) {
++	if ((skb_hnat_tops(skb) && !(hw_path->flags & FLOW_OFFLOAD_PATH_TNL))
++	    || (skb_hnat_cdrt(skb) && skb_dst(skb) && !dst_xfrm(skb_dst(skb)))) {
+ 		hnat_get_filled_unbind_entry(skb, &entry);
+ 		goto hnat_entry_bind;
+ 	}
+@@ -1734,7 +1741,8 @@ static unsigned int skb_to_hnat_info(str
+ 	/* Fill Layer2 Info.*/
+ 	entry = ppe_fill_L2_info(eth, entry, hw_path);
+ 
+-	if (skb_hnat_tops(skb) && hw_path->flags & FLOW_OFFLOAD_PATH_TNL)
++	if ((skb_hnat_tops(skb) && hw_path->flags & FLOW_OFFLOAD_PATH_TNL)
++	    || (!skb_hnat_cdrt(skb) && skb_dst(skb) && dst_xfrm(skb_dst(skb))))
+ 		goto hnat_entry_skip_bind;
+ 
+ hnat_entry_bind:
+@@ -1938,6 +1946,8 @@ hnat_entry_bind:
+ 
+ #if defined(CONFIG_MEDIATEK_NETSYS_V3)
+ 	hnat_fill_offload_engine_entry(skb, &entry, dev);
++	if (skb_hnat_cdrt(skb))
++		entry = ppe_fill_L2_info(eth, entry, hw_path);
+ #endif
+ 
+ hnat_entry_skip_bind:
+@@ -2215,6 +2225,7 @@ int mtk_sw_nat_hook_rx(struct sk_buff *s
+ 
+ 	skb_hnat_alg(skb) = 0;
+ 	skb_hnat_set_tops(skb, 0);
++	skb_hnat_set_cdrt(skb, 0);
+ 	skb_hnat_magic_tag(skb) = HNAT_MAGIC_TAG;
+ 
+ 	if (skb_hnat_iface(skb) == FOE_MAGIC_WED0)
+@@ -2301,7 +2312,8 @@ static unsigned int mtk_hnat_accel_type(
+ 	 * is from local_out which is also filtered in sanity check.
+ 	 */
+ 	dst = skb_dst(skb);
+-	if (dst && dst_xfrm(dst))
++	if (dst && dst_xfrm(dst)
++	    && (!mtk_crypto_offloadable || !mtk_crypto_offloadable(skb)))
+ 		return 0;
+ 
+ 	ct = nf_ct_get(skb, &ctinfo);
+@@ -2993,7 +3005,10 @@ mtk_hnat_ipv4_nf_local_out(void *priv, s
+ 	if (iph->protocol == IPPROTO_IPV6) {
+ 		entry->udib1.pkt_type = IPV6_6RD;
+ 		hnat_set_head_frags(state, skb, 0, hnat_set_alg);
+-	} else if (!skb_hnat_tops(skb)) {
++	} else if (is_magic_tag_valid(skb)
++		   && (skb_hnat_cdrt(skb) || skb_hnat_tops(skb))) {
++		hnat_set_head_frags(state, skb, 0, hnat_set_alg);
++	} else {
+ 		hnat_set_head_frags(state, skb, 1, hnat_set_alg);
+ 	}
+ 
+--- a/drivers/net/ethernet/mediatek/mtk_hnat/nf_hnat_mtk.h
++++ b/drivers/net/ethernet/mediatek/mtk_hnat/nf_hnat_mtk.h
+@@ -46,7 +46,8 @@ struct hnat_desc {
+ 	u32 amsdu : 1;
+ 	u32 tops : 6;
+ 	u32 is_decap : 1;
+-	u32 resv3 : 12;
++	u32 cdrt : 8;
++	u32 resv3 : 4;
+ 	u32 magic_tag_protect : 16;
+ } __packed;
+ #elif defined(CONFIG_MEDIATEK_NETSYS_RX_V2)
+@@ -99,12 +100,16 @@ struct hnat_desc {
+ #define skb_hnat_is_encap(skb) (!skb_hnat_is_decap(skb))
+ #define skb_hnat_set_tops(skb, tops) ((skb_hnat_tops(skb)) = (tops))
+ #define skb_hnat_set_is_decap(skb, is_decap) ((skb_hnat_is_decap(skb)) = (is_decap))
++#define skb_hnat_cdrt(skb) (((struct hnat_desc *)((skb)->head))->cdrt)
++#define skb_hnat_set_cdrt(skb, cdrt) ((skb_hnat_cdrt(skb)) = (cdrt))
+ #else /* !defined(CONFIG_MEDIATEK_NETSYS_V3) */
+ #define skb_hnat_tops(skb) (0)
+ #define skb_hnat_is_decap(skb) (0)
+ #define skb_hnat_is_encap(skb) (0)
+ #define skb_hnat_set_tops(skb, tops)
+ #define skb_hnat_set_is_decap(skb, is_decap)
++#define skb_hnat_cdrt(skb) (0)
++#define skb_hnat_set_cdrt(skb, cdrt)
+ #endif /* defined(CONFIG_MEDIATEK_NETSYS_V3) */
+ #define skb_hnat_magic(skb) (((struct hnat_desc *)(skb->head))->magic)
+ #define skb_hnat_reason(skb) (((struct hnat_desc *)(skb->head))->crsn)
+--- a/drivers/net/ethernet/mediatek/mtk_hnat/hnat.c
++++ b/drivers/net/ethernet/mediatek/mtk_hnat/hnat.c
+@@ -49,6 +49,8 @@ int (*mtk_tnl_decap_offload)(struct sk_b
+ EXPORT_SYMBOL(mtk_tnl_decap_offload);
+ bool (*mtk_tnl_decap_offloadable)(struct sk_buff *skb) = NULL;
+ EXPORT_SYMBOL(mtk_tnl_decap_offloadable);
++bool (*mtk_crypto_offloadable)(struct sk_buff *skb) = NULL;
++EXPORT_SYMBOL(mtk_crypto_offloadable);
+ 
+ static void hnat_sma_build_entry(struct timer_list *t)
+ {
+--- a/drivers/net/ethernet/mediatek/mtk_hnat/hnat.h
++++ b/drivers/net/ethernet/mediatek/mtk_hnat/hnat.h
+@@ -1087,6 +1087,8 @@ enum FoeIpAct {
+ #define NR_WDMA1_PORT 9
+ #define NR_WDMA2_PORT 13
+ #define NR_GMAC3_PORT 15
++#define NR_EIP197_TPORT 2
++#define NR_EIP197_QDMA_TPORT 3
+ #define NR_TDMA_TPORT 4
+ #define NR_TDMA_QDMA_TPORT 5
+ #define LAN_DEV_NAME hnat_priv->lan
+@@ -1233,6 +1235,7 @@ extern int qos_toggle;
+ extern int (*mtk_tnl_encap_offload)(struct sk_buff *skb);
+ extern int (*mtk_tnl_decap_offload)(struct sk_buff *skb);
+ extern bool (*mtk_tnl_decap_offloadable)(struct sk_buff *skb);
++extern bool (*mtk_crypto_offloadable)(struct sk_buff *skb);
+ 
+ int ext_if_add(struct extdev_entry *ext_entry);
+ int ext_if_del(struct extdev_entry *ext_entry);