[][Add macsec HW offload backport from kernel 5.18]

[Description]
Add macsec HW offload backport from kernel 5.18.

[Release-log]
N/A

Change-Id: I5b143fe620ec4bcae4075d1d85db5e41c8d48717
Reviewed-on: https://gerrit.mediatek.inc/c/openwrt/feeds/mtk_openwrt_feeds/+/5981730
diff --git a/target/linux/mediatek/patches-5.4/999-1700-macsec-revert-async-support.patch b/target/linux/mediatek/patches-5.4/999-1700-macsec-revert-async-support.patch
deleted file mode 100644
index 3212b6b..0000000
--- a/target/linux/mediatek/patches-5.4/999-1700-macsec-revert-async-support.patch
+++ /dev/null
@@ -1,27 +0,0 @@
-From 8b45e5c6b6b419305ef893e1dfdd4c69c020958b Mon Sep 17 00:00:00 2001
-From: Sam Shih <sam.shih@mediatek.com>
-Date: Fri, 2 Jun 2023 13:05:59 +0800
-Subject: [PATCH] 
- [backport-networking-drivers][999-1700-macsec-revert-async-support.patch]
-
----
- drivers/net/macsec.c | 3 +--
- 1 file changed, 1 insertion(+), 2 deletions(-)
-
-diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
-index f729f55f6..e3f03c89c 100644
---- a/drivers/net/macsec.c
-+++ b/drivers/net/macsec.c
-@@ -1311,8 +1311,7 @@ static struct crypto_aead *macsec_alloc_tfm(char *key, int key_len, int icv_len)
- 	struct crypto_aead *tfm;
- 	int ret;
- 
--	/* Pick a sync gcm(aes) cipher to ensure order is preserved. */
--	tfm = crypto_alloc_aead("gcm(aes)", 0, CRYPTO_ALG_ASYNC);
-+	tfm = crypto_alloc_aead("gcm(aes)", 0, 0);
- 
- 	if (IS_ERR(tfm))
- 		return tfm;
--- 
-2.34.1
-
diff --git a/target/linux/mediatek/patches-5.4/999-1750-v5.18-net-macsec-get-ready-to-backport-from-5-18.patch b/target/linux/mediatek/patches-5.4/999-1750-v5.18-net-macsec-get-ready-to-backport-from-5-18.patch
new file mode 100644
index 0000000..486e91c
--- /dev/null
+++ b/target/linux/mediatek/patches-5.4/999-1750-v5.18-net-macsec-get-ready-to-backport-from-5-18.patch
@@ -0,0 +1,19 @@
+--- a/drivers/net/macsec.c
++++ b/drivers/net/macsec.c
+@@ -16,7 +16,6 @@
+ #include <net/genetlink.h>
+ #include <net/sock.h>
+ #include <net/gro_cells.h>
+-#include <linux/if_arp.h>
+ 
+ #include <uapi/linux/if_macsec.h>
+ 
+@@ -3240,8 +3239,6 @@ static int macsec_newlink(struct net *ne
+ 	real_dev = __dev_get_by_index(net, nla_get_u32(tb[IFLA_LINK]));
+ 	if (!real_dev)
+ 		return -ENODEV;
+-	if (real_dev->type != ARPHRD_ETHER)
+-		return -EINVAL;
+ 
+ 	dev->priv_flags |= IFF_MACSEC;
+ 
diff --git a/target/linux/mediatek/patches-5.4/999-1751-01-v5.18-net-macsec-move-some-definitions-in-a-dedicated-header.patch b/target/linux/mediatek/patches-5.4/999-1751-01-v5.18-net-macsec-move-some-definitions-in-a-dedicated-header.patch
new file mode 100644
index 0000000..204d821
--- /dev/null
+++ b/target/linux/mediatek/patches-5.4/999-1751-01-v5.18-net-macsec-move-some-definitions-in-a-dedicated-header.patch
@@ -0,0 +1,406 @@
+From c0e4eadfb8daf2e9557c7450f9b237c08b404419 Mon Sep 17 00:00:00 2001
+From: Antoine Tenart <antoine.tenart@bootlin.com>
+Date: Mon, 13 Jan 2020 23:31:39 +0100
+Subject: net: macsec: move some definitions in a dedicated header
+
+This patch moves some structure, type and identifier definitions into a
+MACsec specific header. This patch does not modify how the MACsec code
+is running and only move things around. This is a preparation for the
+future MACsec hardware offloading support, which will re-use those
+definitions outside macsec.c.
+
+Signed-off-by: Antoine Tenart <antoine.tenart@bootlin.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+---
+ drivers/net/macsec.c | 164 +----------------------------------------------
+ include/net/macsec.h | 177 +++++++++++++++++++++++++++++++++++++++++++++++++++
+ 2 files changed, 178 insertions(+), 163 deletions(-)
+ create mode 100644 include/net/macsec.h
+
+diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
+index afd8b2a082454..a336eee018f0b 100644
+--- a/drivers/net/macsec.c
++++ b/drivers/net/macsec.c
+@@ -16,11 +16,10 @@
+ #include <net/genetlink.h>
+ #include <net/sock.h>
+ #include <net/gro_cells.h>
++#include <net/macsec.h>
+ 
+ #include <uapi/linux/if_macsec.h>
+ 
+-typedef u64 __bitwise sci_t;
+-
+ #define MACSEC_SCI_LEN 8
+ 
+ /* SecTAG length = macsec_eth_header without the optional SCI */
+@@ -58,8 +57,6 @@ struct macsec_eth_header {
+ #define GCM_AES_IV_LEN 12
+ #define DEFAULT_ICV_LEN 16
+ 
+-#define MACSEC_NUM_AN 4 /* 2 bits for the association number */
+-
+ #define for_each_rxsc(secy, sc)				\
+ 	for (sc = rcu_dereference_bh(secy->rx_sc);	\
+ 	     sc;					\
+@@ -77,49 +74,6 @@ struct gcm_iv {
+ 	__be32 pn;
+ };
+ 
+-/**
+- * struct macsec_key - SA key
+- * @id: user-provided key identifier
+- * @tfm: crypto struct, key storage
+- */
+-struct macsec_key {
+-	u8 id[MACSEC_KEYID_LEN];
+-	struct crypto_aead *tfm;
+-};
+-
+-struct macsec_rx_sc_stats {
+-	__u64 InOctetsValidated;
+-	__u64 InOctetsDecrypted;
+-	__u64 InPktsUnchecked;
+-	__u64 InPktsDelayed;
+-	__u64 InPktsOK;
+-	__u64 InPktsInvalid;
+-	__u64 InPktsLate;
+-	__u64 InPktsNotValid;
+-	__u64 InPktsNotUsingSA;
+-	__u64 InPktsUnusedSA;
+-};
+-
+-struct macsec_rx_sa_stats {
+-	__u32 InPktsOK;
+-	__u32 InPktsInvalid;
+-	__u32 InPktsNotValid;
+-	__u32 InPktsNotUsingSA;
+-	__u32 InPktsUnusedSA;
+-};
+-
+-struct macsec_tx_sa_stats {
+-	__u32 OutPktsProtected;
+-	__u32 OutPktsEncrypted;
+-};
+-
+-struct macsec_tx_sc_stats {
+-	__u64 OutPktsProtected;
+-	__u64 OutPktsEncrypted;
+-	__u64 OutOctetsProtected;
+-	__u64 OutOctetsEncrypted;
+-};
+-
+ struct macsec_dev_stats {
+ 	__u64 OutPktsUntagged;
+ 	__u64 InPktsUntagged;
+@@ -131,124 +85,8 @@ struct macsec_dev_stats {
+ 	__u64 InPktsOverrun;
+ };
+ 
+-/**
+- * struct macsec_rx_sa - receive secure association
+- * @active:
+- * @next_pn: packet number expected for the next packet
+- * @lock: protects next_pn manipulations
+- * @key: key structure
+- * @stats: per-SA stats
+- */
+-struct macsec_rx_sa {
+-	struct macsec_key key;
+-	spinlock_t lock;
+-	u32 next_pn;
+-	refcount_t refcnt;
+-	bool active;
+-	struct macsec_rx_sa_stats __percpu *stats;
+-	struct macsec_rx_sc *sc;
+-	struct rcu_head rcu;
+-};
+-
+-struct pcpu_rx_sc_stats {
+-	struct macsec_rx_sc_stats stats;
+-	struct u64_stats_sync syncp;
+-};
+-
+-/**
+- * struct macsec_rx_sc - receive secure channel
+- * @sci: secure channel identifier for this SC
+- * @active: channel is active
+- * @sa: array of secure associations
+- * @stats: per-SC stats
+- */
+-struct macsec_rx_sc {
+-	struct macsec_rx_sc __rcu *next;
+-	sci_t sci;
+-	bool active;
+-	struct macsec_rx_sa __rcu *sa[MACSEC_NUM_AN];
+-	struct pcpu_rx_sc_stats __percpu *stats;
+-	refcount_t refcnt;
+-	struct rcu_head rcu_head;
+-};
+-
+-/**
+- * struct macsec_tx_sa - transmit secure association
+- * @active:
+- * @next_pn: packet number to use for the next packet
+- * @lock: protects next_pn manipulations
+- * @key: key structure
+- * @stats: per-SA stats
+- */
+-struct macsec_tx_sa {
+-	struct macsec_key key;
+-	spinlock_t lock;
+-	u32 next_pn;
+-	refcount_t refcnt;
+-	bool active;
+-	struct macsec_tx_sa_stats __percpu *stats;
+-	struct rcu_head rcu;
+-};
+-
+-struct pcpu_tx_sc_stats {
+-	struct macsec_tx_sc_stats stats;
+-	struct u64_stats_sync syncp;
+-};
+-
+-/**
+- * struct macsec_tx_sc - transmit secure channel
+- * @active:
+- * @encoding_sa: association number of the SA currently in use
+- * @encrypt: encrypt packets on transmit, or authenticate only
+- * @send_sci: always include the SCI in the SecTAG
+- * @end_station:
+- * @scb: single copy broadcast flag
+- * @sa: array of secure associations
+- * @stats: stats for this TXSC
+- */
+-struct macsec_tx_sc {
+-	bool active;
+-	u8 encoding_sa;
+-	bool encrypt;
+-	bool send_sci;
+-	bool end_station;
+-	bool scb;
+-	struct macsec_tx_sa __rcu *sa[MACSEC_NUM_AN];
+-	struct pcpu_tx_sc_stats __percpu *stats;
+-};
+-
+ #define MACSEC_VALIDATE_DEFAULT MACSEC_VALIDATE_STRICT
+ 
+-/**
+- * struct macsec_secy - MACsec Security Entity
+- * @netdev: netdevice for this SecY
+- * @n_rx_sc: number of receive secure channels configured on this SecY
+- * @sci: secure channel identifier used for tx
+- * @key_len: length of keys used by the cipher suite
+- * @icv_len: length of ICV used by the cipher suite
+- * @validate_frames: validation mode
+- * @operational: MAC_Operational flag
+- * @protect_frames: enable protection for this SecY
+- * @replay_protect: enable packet number checks on receive
+- * @replay_window: size of the replay window
+- * @tx_sc: transmit secure channel
+- * @rx_sc: linked list of receive secure channels
+- */
+-struct macsec_secy {
+-	struct net_device *netdev;
+-	unsigned int n_rx_sc;
+-	sci_t sci;
+-	u16 key_len;
+-	u16 icv_len;
+-	enum macsec_validation_type validate_frames;
+-	bool operational;
+-	bool protect_frames;
+-	bool replay_protect;
+-	u32 replay_window;
+-	struct macsec_tx_sc tx_sc;
+-	struct macsec_rx_sc __rcu *rx_sc;
+-};
+-
+ struct pcpu_secy_stats {
+ 	struct macsec_dev_stats stats;
+ 	struct u64_stats_sync syncp;
+diff --git a/include/net/macsec.h b/include/net/macsec.h
+new file mode 100644
+index 0000000000000..e7b41c1043f6f
+--- /dev/null
++++ b/include/net/macsec.h
+@@ -0,0 +1,177 @@
++/* SPDX-License-Identifier: GPL-2.0+ */
++/*
++ * MACsec netdev header, used for h/w accelerated implementations.
++ *
++ * Copyright (c) 2015 Sabrina Dubroca <sd@queasysnail.net>
++ */
++#ifndef _NET_MACSEC_H_
++#define _NET_MACSEC_H_
++
++#include <linux/u64_stats_sync.h>
++#include <uapi/linux/if_link.h>
++#include <uapi/linux/if_macsec.h>
++
++typedef u64 __bitwise sci_t;
++
++#define MACSEC_NUM_AN 4 /* 2 bits for the association number */
++
++/**
++ * struct macsec_key - SA key
++ * @id: user-provided key identifier
++ * @tfm: crypto struct, key storage
++ */
++struct macsec_key {
++	u8 id[MACSEC_KEYID_LEN];
++	struct crypto_aead *tfm;
++};
++
++struct macsec_rx_sc_stats {
++	__u64 InOctetsValidated;
++	__u64 InOctetsDecrypted;
++	__u64 InPktsUnchecked;
++	__u64 InPktsDelayed;
++	__u64 InPktsOK;
++	__u64 InPktsInvalid;
++	__u64 InPktsLate;
++	__u64 InPktsNotValid;
++	__u64 InPktsNotUsingSA;
++	__u64 InPktsUnusedSA;
++};
++
++struct macsec_rx_sa_stats {
++	__u32 InPktsOK;
++	__u32 InPktsInvalid;
++	__u32 InPktsNotValid;
++	__u32 InPktsNotUsingSA;
++	__u32 InPktsUnusedSA;
++};
++
++struct macsec_tx_sa_stats {
++	__u32 OutPktsProtected;
++	__u32 OutPktsEncrypted;
++};
++
++struct macsec_tx_sc_stats {
++	__u64 OutPktsProtected;
++	__u64 OutPktsEncrypted;
++	__u64 OutOctetsProtected;
++	__u64 OutOctetsEncrypted;
++};
++
++/**
++ * struct macsec_rx_sa - receive secure association
++ * @active:
++ * @next_pn: packet number expected for the next packet
++ * @lock: protects next_pn manipulations
++ * @key: key structure
++ * @stats: per-SA stats
++ */
++struct macsec_rx_sa {
++	struct macsec_key key;
++	spinlock_t lock;
++	u32 next_pn;
++	refcount_t refcnt;
++	bool active;
++	struct macsec_rx_sa_stats __percpu *stats;
++	struct macsec_rx_sc *sc;
++	struct rcu_head rcu;
++};
++
++struct pcpu_rx_sc_stats {
++	struct macsec_rx_sc_stats stats;
++	struct u64_stats_sync syncp;
++};
++
++struct pcpu_tx_sc_stats {
++	struct macsec_tx_sc_stats stats;
++	struct u64_stats_sync syncp;
++};
++
++/**
++ * struct macsec_rx_sc - receive secure channel
++ * @sci: secure channel identifier for this SC
++ * @active: channel is active
++ * @sa: array of secure associations
++ * @stats: per-SC stats
++ */
++struct macsec_rx_sc {
++	struct macsec_rx_sc __rcu *next;
++	sci_t sci;
++	bool active;
++	struct macsec_rx_sa __rcu *sa[MACSEC_NUM_AN];
++	struct pcpu_rx_sc_stats __percpu *stats;
++	refcount_t refcnt;
++	struct rcu_head rcu_head;
++};
++
++/**
++ * struct macsec_tx_sa - transmit secure association
++ * @active:
++ * @next_pn: packet number to use for the next packet
++ * @lock: protects next_pn manipulations
++ * @key: key structure
++ * @stats: per-SA stats
++ */
++struct macsec_tx_sa {
++	struct macsec_key key;
++	spinlock_t lock;
++	u32 next_pn;
++	refcount_t refcnt;
++	bool active;
++	struct macsec_tx_sa_stats __percpu *stats;
++	struct rcu_head rcu;
++};
++
++/**
++ * struct macsec_tx_sc - transmit secure channel
++ * @active:
++ * @encoding_sa: association number of the SA currently in use
++ * @encrypt: encrypt packets on transmit, or authenticate only
++ * @send_sci: always include the SCI in the SecTAG
++ * @end_station:
++ * @scb: single copy broadcast flag
++ * @sa: array of secure associations
++ * @stats: stats for this TXSC
++ */
++struct macsec_tx_sc {
++	bool active;
++	u8 encoding_sa;
++	bool encrypt;
++	bool send_sci;
++	bool end_station;
++	bool scb;
++	struct macsec_tx_sa __rcu *sa[MACSEC_NUM_AN];
++	struct pcpu_tx_sc_stats __percpu *stats;
++};
++
++/**
++ * struct macsec_secy - MACsec Security Entity
++ * @netdev: netdevice for this SecY
++ * @n_rx_sc: number of receive secure channels configured on this SecY
++ * @sci: secure channel identifier used for tx
++ * @key_len: length of keys used by the cipher suite
++ * @icv_len: length of ICV used by the cipher suite
++ * @validate_frames: validation mode
++ * @operational: MAC_Operational flag
++ * @protect_frames: enable protection for this SecY
++ * @replay_protect: enable packet number checks on receive
++ * @replay_window: size of the replay window
++ * @tx_sc: transmit secure channel
++ * @rx_sc: linked list of receive secure channels
++ */
++struct macsec_secy {
++	struct net_device *netdev;
++	unsigned int n_rx_sc;
++	sci_t sci;
++	u16 key_len;
++	u16 icv_len;
++	enum macsec_validation_type validate_frames;
++	bool operational;
++	bool protect_frames;
++	bool replay_protect;
++	u32 replay_window;
++	struct macsec_tx_sc tx_sc;
++	struct macsec_rx_sc __rcu *rx_sc;
++};
++
++#endif /* _NET_MACSEC_H_ */
+-- 
+cgit 1.2.3-1.el7
+
diff --git a/target/linux/mediatek/patches-5.4/999-1752-02-v5.18-net-macsec-introduce-the-macsec_context-structure.patch b/target/linux/mediatek/patches-5.4/999-1752-02-v5.18-net-macsec-introduce-the-macsec_context-structure.patch
new file mode 100644
index 0000000..40c1149
--- /dev/null
+++ b/target/linux/mediatek/patches-5.4/999-1752-02-v5.18-net-macsec-introduce-the-macsec_context-structure.patch
@@ -0,0 +1,103 @@
+From 76564261a7db80c5f5c624e0122a28787f266bdf Mon Sep 17 00:00:00 2001
+From: Antoine Tenart <antoine.tenart@bootlin.com>
+Date: Mon, 13 Jan 2020 23:31:40 +0100
+Subject: net: macsec: introduce the macsec_context structure
+
+This patch introduces the macsec_context structure. It will be used
+in the kernel to exchange information between the common MACsec
+implementation (macsec.c) and the MACsec hardware offloading
+implementations. This structure contains pointers to MACsec specific
+structures which contain the actual MACsec configuration, and to the
+underlying device (phydev for now).
+
+Signed-off-by: Antoine Tenart <antoine.tenart@bootlin.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+---
+ include/linux/phy.h                |  2 ++
+ include/net/macsec.h               | 21 +++++++++++++++++++++
+ include/uapi/linux/if_link.h       |  7 +++++++
+ tools/include/uapi/linux/if_link.h |  7 +++++++
+ 4 files changed, 37 insertions(+)
+
+diff --git a/include/linux/phy.h b/include/linux/phy.h
+index 3a70b756ac1aa..be079a7bb40aa 100644
+--- a/include/linux/phy.h
++++ b/include/linux/phy.h
+@@ -332,6 +332,8 @@ struct phy_c45_device_ids {
+ 	u32 device_ids[8];
+ };
+ 
++struct macsec_context;
++
+ /* phy_device: An instance of a PHY
+  *
+  * drv: Pointer to the driver for this PHY instance
+diff --git a/include/net/macsec.h b/include/net/macsec.h
+index e7b41c1043f6f..0b98803f92ec1 100644
+--- a/include/net/macsec.h
++++ b/include/net/macsec.h
+@@ -174,4 +174,25 @@ struct macsec_secy {
+ 	struct macsec_rx_sc __rcu *rx_sc;
+ };
+ 
++/**
++ * struct macsec_context - MACsec context for hardware offloading
++ */
++struct macsec_context {
++	struct phy_device *phydev;
++	enum macsec_offload offload;
++
++	struct macsec_secy *secy;
++	struct macsec_rx_sc *rx_sc;
++	struct {
++		unsigned char assoc_num;
++		u8 key[MACSEC_KEYID_LEN];
++		union {
++			struct macsec_rx_sa *rx_sa;
++			struct macsec_tx_sa *tx_sa;
++		};
++	} sa;
++
++	u8 prepare:1;
++};
++
+ #endif /* _NET_MACSEC_H_ */
+diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h
+index 1d69f637c5d6a..024af2d1d0af4 100644
+--- a/include/uapi/linux/if_link.h
++++ b/include/uapi/linux/if_link.h
+@@ -486,6 +486,13 @@ enum macsec_validation_type {
+ 	MACSEC_VALIDATE_MAX = __MACSEC_VALIDATE_END - 1,
+ };
+ 
++enum macsec_offload {
++	MACSEC_OFFLOAD_OFF = 0,
++	MACSEC_OFFLOAD_PHY = 1,
++	__MACSEC_OFFLOAD_END,
++	MACSEC_OFFLOAD_MAX = __MACSEC_OFFLOAD_END - 1,
++};
++
+ /* IPVLAN section */
+ enum {
+ 	IFLA_IPVLAN_UNSPEC,
+diff --git a/tools/include/uapi/linux/if_link.h b/tools/include/uapi/linux/if_link.h
+index 8aec8769d9442..42efdb84d1898 100644
+--- a/tools/include/uapi/linux/if_link.h
++++ b/tools/include/uapi/linux/if_link.h
+@@ -485,6 +485,13 @@ enum macsec_validation_type {
+ 	MACSEC_VALIDATE_MAX = __MACSEC_VALIDATE_END - 1,
+ };
+ 
++enum macsec_offload {
++	MACSEC_OFFLOAD_OFF = 0,
++	MACSEC_OFFLOAD_PHY = 1,
++	__MACSEC_OFFLOAD_END,
++	MACSEC_OFFLOAD_MAX = __MACSEC_OFFLOAD_END - 1,
++};
++
+ /* IPVLAN section */
+ enum {
+ 	IFLA_IPVLAN_UNSPEC,
+-- 
+cgit 1.2.3-1.el7
+
diff --git a/target/linux/mediatek/patches-5.4/999-1753-03-v5.18-net-macsec-introduce-MACsec-ops.patch b/target/linux/mediatek/patches-5.4/999-1753-03-v5.18-net-macsec-introduce-MACsec-ops.patch
new file mode 100644
index 0000000..1681b74
--- /dev/null
+++ b/target/linux/mediatek/patches-5.4/999-1753-03-v5.18-net-macsec-introduce-MACsec-ops.patch
@@ -0,0 +1,50 @@
+From 0830e20b62ad156f7df5ff5b9c4cea280ebe8fef Mon Sep 17 00:00:00 2001
+From: Antoine Tenart <antoine.tenart@bootlin.com>
+Date: Mon, 13 Jan 2020 23:31:41 +0100
+Subject: net: macsec: introduce MACsec ops
+
+This patch introduces MACsec ops for drivers to support offloading
+MACsec operations.
+
+Signed-off-by: Antoine Tenart <antoine.tenart@bootlin.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+---
+ include/net/macsec.h | 24 ++++++++++++++++++++++++
+ 1 file changed, 24 insertions(+)
+
+diff --git a/include/net/macsec.h b/include/net/macsec.h
+index 0b98803f92ec1..16e7e5061178e 100644
+--- a/include/net/macsec.h
++++ b/include/net/macsec.h
+@@ -195,4 +195,28 @@ struct macsec_context {
+ 	u8 prepare:1;
+ };
+ 
++/**
++ * struct macsec_ops - MACsec offloading operations
++ */
++struct macsec_ops {
++	/* Device wide */
++	int (*mdo_dev_open)(struct macsec_context *ctx);
++	int (*mdo_dev_stop)(struct macsec_context *ctx);
++	/* SecY */
++	int (*mdo_add_secy)(struct macsec_context *ctx);
++	int (*mdo_upd_secy)(struct macsec_context *ctx);
++	int (*mdo_del_secy)(struct macsec_context *ctx);
++	/* Security channels */
++	int (*mdo_add_rxsc)(struct macsec_context *ctx);
++	int (*mdo_upd_rxsc)(struct macsec_context *ctx);
++	int (*mdo_del_rxsc)(struct macsec_context *ctx);
++	/* Security associations */
++	int (*mdo_add_rxsa)(struct macsec_context *ctx);
++	int (*mdo_upd_rxsa)(struct macsec_context *ctx);
++	int (*mdo_del_rxsa)(struct macsec_context *ctx);
++	int (*mdo_add_txsa)(struct macsec_context *ctx);
++	int (*mdo_upd_txsa)(struct macsec_context *ctx);
++	int (*mdo_del_txsa)(struct macsec_context *ctx);
++};
++
+ #endif /* _NET_MACSEC_H_ */
+-- 
+cgit 1.2.3-1.el7
+
diff --git a/target/linux/mediatek/patches-5.4/999-1754-04-v5.18-net-phy-add-MACsec-ops-in-phy_device.patch b/target/linux/mediatek/patches-5.4/999-1754-04-v5.18-net-phy-add-MACsec-ops-in-phy_device.patch
new file mode 100644
index 0000000..dff64b3
--- /dev/null
+++ b/target/linux/mediatek/patches-5.4/999-1754-04-v5.18-net-phy-add-MACsec-ops-in-phy_device.patch
@@ -0,0 +1,50 @@
+From 2e18135845b359f26c37df38ba56565496517c10 Mon Sep 17 00:00:00 2001
+From: Antoine Tenart <antoine.tenart@bootlin.com>
+Date: Mon, 13 Jan 2020 23:31:42 +0100
+Subject: net: phy: add MACsec ops in phy_device
+
+This patch adds a reference to MACsec ops in the phy_device, to allow
+PHYs to support offloading MACsec operations. The phydev lock will be
+held while calling those helpers.
+
+Signed-off-by: Antoine Tenart <antoine.tenart@bootlin.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+---
+ include/linux/phy.h | 7 +++++++
+ 1 file changed, 7 insertions(+)
+
+diff --git a/include/linux/phy.h b/include/linux/phy.h
+index be079a7bb40aa..2929d0bc307fe 100644
+--- a/include/linux/phy.h
++++ b/include/linux/phy.h
+@@ -333,6 +333,7 @@ struct phy_c45_device_ids {
+ };
+ 
+ struct macsec_context;
++struct macsec_ops;
+ 
+ /* phy_device: An instance of a PHY
+  *
+@@ -356,6 +357,7 @@ struct macsec_context;
+  * attached_dev: The attached enet driver's device instance ptr
+  * adjust_link: Callback for the enet controller to respond to
+  * changes in the link state.
++ * macsec_ops: MACsec offloading ops.
+  *
+  * speed, duplex, pause, supported, advertising, lp_advertising,
+  * and autoneg are used like in mii_if_info
+@@ -455,6 +457,11 @@ struct phy_device {
+ 
+ 	void (*phy_link_change)(struct phy_device *, bool up, bool do_carrier);
+ 	void (*adjust_link)(struct net_device *dev);
++
++#if IS_ENABLED(CONFIG_MACSEC)
++	/* MACsec management functions */
++	const struct macsec_ops *macsec_ops;
++#endif
+ };
+ #define to_phy_device(d) container_of(to_mdio_device(d), \
+ 				      struct phy_device, mdio)
+-- 
+cgit 1.2.3-1.el7
+
diff --git a/target/linux/mediatek/patches-5.4/999-1755-05-v5.18-net-macsec-hardware-offloading-infrastructure.patch b/target/linux/mediatek/patches-5.4/999-1755-05-v5.18-net-macsec-hardware-offloading-infrastructure.patch
new file mode 100644
index 0000000..1404301
--- /dev/null
+++ b/target/linux/mediatek/patches-5.4/999-1755-05-v5.18-net-macsec-hardware-offloading-infrastructure.patch
@@ -0,0 +1,825 @@
+From 3cf3227a21d1fb020fe26128e60321bd2151e922 Mon Sep 17 00:00:00 2001
+From: Antoine Tenart <antoine.tenart@bootlin.com>
+Date: Mon, 13 Jan 2020 23:31:43 +0100
+Subject: net: macsec: hardware offloading infrastructure
+
+This patch introduces the MACsec hardware offloading infrastructure.
+
+The main idea here is to re-use the logic and data structures of the
+software MACsec implementation. This allows not to duplicate definitions
+and structure storing the same kind of information. It also allows to
+use a unified genlink interface for both MACsec implementations (so that
+the same userspace tool, `ip macsec`, is used with the same arguments).
+The MACsec offloading support cannot be disabled if an interface
+supports it at the moment.
+
+The MACsec configuration is passed to device drivers supporting it
+through macsec_ops which are called from the MACsec genl helpers. Those
+functions call the macsec ops of PHY and Ethernet drivers in two steps:
+a preparation one, and a commit one. The first step is allowed to fail
+and should be used to check if a provided configuration is compatible
+with the features provided by a MACsec engine, while the second step is
+not allowed to fail and should only be used to enable a given MACsec
+configuration. Two extra calls are made: when a virtual MACsec interface
+is created and when it is deleted, so that the hardware driver can stay
+in sync.
+
+The Rx and TX handlers are modified to take in account the special case
+were the MACsec transformation happens in the hardware, whether in a PHY
+or in a MAC, as the packets seen by the networking stack on both the
+physical and MACsec virtual interface are exactly the same. This leads
+to some limitations: the hardware and software implementations can't be
+used on the same physical interface, as the policies would be impossible
+to fulfill (such as strict validation of the frames). Also only a single
+virtual MACsec interface can be offloaded to a physical port supporting
+hardware offloading as it would be impossible to guess onto which
+interface a given packet should go (for ingress traffic).
+
+Another limitation as of now is that the counters and statistics are not
+reported back from the hardware to the software MACsec implementation.
+This isn't an issue when using offloaded MACsec transformations, but it
+should be added in the future so that the MACsec state can be reported
+to the user (which would also improve the debug).
+
+Signed-off-by: Antoine Tenart <antoine.tenart@bootlin.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+---
+ drivers/net/macsec.c | 453 +++++++++++++++++++++++++++++++++++++++++++++++++--
+ 1 file changed, 441 insertions(+), 12 deletions(-)
+
+diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
+index a336eee018f0b..36b0416381bf1 100644
+--- a/drivers/net/macsec.c
++++ b/drivers/net/macsec.c
+@@ -11,12 +11,14 @@
+ #include <linux/module.h>
+ #include <crypto/aead.h>
+ #include <linux/etherdevice.h>
++#include <linux/netdevice.h>
+ #include <linux/rtnetlink.h>
+ #include <linux/refcount.h>
+ #include <net/genetlink.h>
+ #include <net/sock.h>
+ #include <net/gro_cells.h>
+ #include <net/macsec.h>
++#include <linux/phy.h>
+ 
+ #include <uapi/linux/if_macsec.h>
+ 
+@@ -98,6 +100,7 @@ struct pcpu_secy_stats {
+  * @real_dev: pointer to underlying netdevice
+  * @stats: MACsec device stats
+  * @secys: linked list of SecY's on the underlying device
++ * @offload: status of offloading on the MACsec device
+  */
+ struct macsec_dev {
+ 	struct macsec_secy secy;
+@@ -105,6 +108,7 @@ struct macsec_dev {
+ 	struct pcpu_secy_stats __percpu *stats;
+ 	struct list_head secys;
+ 	struct gro_cells gro_cells;
++	enum macsec_offload offload;
+ };
+ 
+ /**
+@@ -318,6 +322,56 @@ static void macsec_set_shortlen(struct macsec_eth_header *h, size_t data_len)
+ 		h->short_length = data_len;
+ }
+ 
++/* Checks if a MACsec interface is being offloaded to an hardware engine */
++static bool macsec_is_offloaded(struct macsec_dev *macsec)
++{
++	if (macsec->offload == MACSEC_OFFLOAD_PHY)
++		return true;
++
++	return false;
++}
++
++/* Checks if underlying layers implement MACsec offloading functions. */
++static bool macsec_check_offload(enum macsec_offload offload,
++				 struct macsec_dev *macsec)
++{
++	if (!macsec || !macsec->real_dev)
++		return false;
++
++	if (offload == MACSEC_OFFLOAD_PHY)
++		return macsec->real_dev->phydev &&
++		       macsec->real_dev->phydev->macsec_ops;
++
++	return false;
++}
++
++static const struct macsec_ops *__macsec_get_ops(enum macsec_offload offload,
++						 struct macsec_dev *macsec,
++						 struct macsec_context *ctx)
++{
++	if (ctx) {
++		memset(ctx, 0, sizeof(*ctx));
++		ctx->offload = offload;
++
++		if (offload == MACSEC_OFFLOAD_PHY)
++			ctx->phydev = macsec->real_dev->phydev;
++	}
++
++	return macsec->real_dev->phydev->macsec_ops;
++}
++
++/* Returns a pointer to the MACsec ops struct if any and updates the MACsec
++ * context device reference if provided.
++ */
++static const struct macsec_ops *macsec_get_ops(struct macsec_dev *macsec,
++					       struct macsec_context *ctx)
++{
++	if (!macsec_check_offload(macsec->offload, macsec))
++		return NULL;
++
++	return __macsec_get_ops(macsec->offload, macsec, ctx);
++}
++
+ /* validate MACsec packet according to IEEE 802.1AE-2006 9.12 */
+ static bool macsec_validate_skb(struct sk_buff *skb, u16 icv_len)
+ {
+@@ -867,8 +921,10 @@ static struct macsec_rx_sc *find_rx_sc_rtnl(struct macsec_secy *secy, sci_t sci)
+ 	return NULL;
+ }
+ 
+-static void handle_not_macsec(struct sk_buff *skb)
++static enum rx_handler_result handle_not_macsec(struct sk_buff *skb)
+ {
++	/* Deliver to the uncontrolled port by default */
++	enum rx_handler_result ret = RX_HANDLER_PASS;
+ 	struct macsec_rxh_data *rxd;
+ 	struct macsec_dev *macsec;
+ 
+@@ -883,7 +939,8 @@ static void handle_not_macsec(struct sk_buff *skb)
+ 		struct sk_buff *nskb;
+ 		struct pcpu_secy_stats *secy_stats = this_cpu_ptr(macsec->stats);
+ 
+-		if (macsec->secy.validate_frames == MACSEC_VALIDATE_STRICT) {
++		if (!macsec_is_offloaded(macsec) &&
++		    macsec->secy.validate_frames == MACSEC_VALIDATE_STRICT) {
+ 			u64_stats_update_begin(&secy_stats->syncp);
+ 			secy_stats->stats.InPktsNoTag++;
+ 			u64_stats_update_end(&secy_stats->syncp);
+@@ -902,9 +959,17 @@ static void handle_not_macsec(struct sk_buff *skb)
+ 			secy_stats->stats.InPktsUntagged++;
+ 			u64_stats_update_end(&secy_stats->syncp);
+ 		}
++
++		if (netif_running(macsec->secy.netdev) &&
++		    macsec_is_offloaded(macsec)) {
++			ret = RX_HANDLER_EXACT;
++			goto out;
++		}
+ 	}
+ 
++out:
+ 	rcu_read_unlock();
++	return ret;
+ }
+ 
+ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb)
+@@ -929,12 +994,8 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb)
+ 		goto drop_direct;
+ 
+ 	hdr = macsec_ethhdr(skb);
+-	if (hdr->eth.h_proto != htons(ETH_P_MACSEC)) {
+-		handle_not_macsec(skb);
+-
+-		/* and deliver to the uncontrolled port */
+-		return RX_HANDLER_PASS;
+-	}
++	if (hdr->eth.h_proto != htons(ETH_P_MACSEC))
++		return handle_not_macsec(skb);
+ 
+ 	skb = skb_unshare(skb, GFP_ATOMIC);
+ 	*pskb = skb;
+@@ -1440,6 +1501,40 @@ static const struct nla_policy macsec_genl_sa_policy[NUM_MACSEC_SA_ATTR] = {
+ 				 .len = MACSEC_MAX_KEY_LEN, },
+ };
+ 
++/* Offloads an operation to a device driver */
++static int macsec_offload(int (* const func)(struct macsec_context *),
++			  struct macsec_context *ctx)
++{
++	int ret;
++
++	if (unlikely(!func))
++		return 0;
++
++	if (ctx->offload == MACSEC_OFFLOAD_PHY)
++		mutex_lock(&ctx->phydev->lock);
++
++	/* Phase I: prepare. The drive should fail here if there are going to be
++	 * issues in the commit phase.
++	 */
++	ctx->prepare = true;
++	ret = (*func)(ctx);
++	if (ret)
++		goto phy_unlock;
++
++	/* Phase II: commit. This step cannot fail. */
++	ctx->prepare = false;
++	ret = (*func)(ctx);
++	/* This should never happen: commit is not allowed to fail */
++	if (unlikely(ret))
++		WARN(1, "MACsec offloading commit failed (%d)\n", ret);
++
++phy_unlock:
++	if (ctx->offload == MACSEC_OFFLOAD_PHY)
++		mutex_unlock(&ctx->phydev->lock);
++
++	return ret;
++}
++
+ static int parse_sa_config(struct nlattr **attrs, struct nlattr **tb_sa)
+ {
+ 	if (!attrs[MACSEC_ATTR_SA_CONFIG])
+@@ -1555,13 +1650,40 @@ static int macsec_add_rxsa(struct sk_buff *skb, struct genl_info *info)
+ 	if (tb_sa[MACSEC_SA_ATTR_ACTIVE])
+ 		rx_sa->active = !!nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]);
+ 
+-	nla_memcpy(rx_sa->key.id, tb_sa[MACSEC_SA_ATTR_KEYID], MACSEC_KEYID_LEN);
+ 	rx_sa->sc = rx_sc;
++
++	/* If h/w offloading is available, propagate to the device */
++	if (macsec_is_offloaded(netdev_priv(dev))) {
++		const struct macsec_ops *ops;
++		struct macsec_context ctx;
++
++		ops = macsec_get_ops(netdev_priv(dev), &ctx);
++		if (!ops) {
++			err = -EOPNOTSUPP;
++			goto cleanup;
++		}
++
++		ctx.sa.assoc_num = assoc_num;
++		ctx.sa.rx_sa = rx_sa;
++		memcpy(ctx.sa.key, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]),
++		       MACSEC_KEYID_LEN);
++
++		err = macsec_offload(ops->mdo_add_rxsa, &ctx);
++		if (err)
++			goto cleanup;
++	}
++
++	nla_memcpy(rx_sa->key.id, tb_sa[MACSEC_SA_ATTR_KEYID], MACSEC_KEYID_LEN);
+ 	rcu_assign_pointer(rx_sc->sa[assoc_num], rx_sa);
+ 
+ 	rtnl_unlock();
+ 
+ 	return 0;
++
++cleanup:
++	kfree(rx_sa);
++	rtnl_unlock();
++	return err;
+ }
+ 
+ static bool validate_add_rxsc(struct nlattr **attrs)
+@@ -1584,6 +1706,8 @@ static int macsec_add_rxsc(struct sk_buff *skb, struct genl_info *info)
+ 	struct nlattr **attrs = info->attrs;
+ 	struct macsec_rx_sc *rx_sc;
+ 	struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1];
++	bool was_active;
++	int ret;
+ 
+ 	if (!attrs[MACSEC_ATTR_IFINDEX])
+ 		return -EINVAL;
+@@ -1609,12 +1733,35 @@ static int macsec_add_rxsc(struct sk_buff *skb, struct genl_info *info)
+ 		return PTR_ERR(rx_sc);
+ 	}
+ 
++	was_active = rx_sc->active;
+ 	if (tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE])
+ 		rx_sc->active = !!nla_get_u8(tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE]);
+ 
++	if (macsec_is_offloaded(netdev_priv(dev))) {
++		const struct macsec_ops *ops;
++		struct macsec_context ctx;
++
++		ops = macsec_get_ops(netdev_priv(dev), &ctx);
++		if (!ops) {
++			ret = -EOPNOTSUPP;
++			goto cleanup;
++		}
++
++		ctx.rx_sc = rx_sc;
++
++		ret = macsec_offload(ops->mdo_add_rxsc, &ctx);
++		if (ret)
++			goto cleanup;
++	}
++
+ 	rtnl_unlock();
+ 
+ 	return 0;
++
++cleanup:
++	rx_sc->active = was_active;
++	rtnl_unlock();
++	return ret;
+ }
+ 
+ static bool validate_add_txsa(struct nlattr **attrs)
+@@ -1651,6 +1798,7 @@ static int macsec_add_txsa(struct sk_buff *skb, struct genl_info *info)
+ 	struct macsec_tx_sa *tx_sa;
+ 	unsigned char assoc_num;
+ 	struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1];
++	bool was_operational;
+ 	int err;
+ 
+ 	if (!attrs[MACSEC_ATTR_IFINDEX])
+@@ -1701,8 +1849,6 @@ static int macsec_add_txsa(struct sk_buff *skb, struct genl_info *info)
+ 		return err;
+ 	}
+ 
+-	nla_memcpy(tx_sa->key.id, tb_sa[MACSEC_SA_ATTR_KEYID], MACSEC_KEYID_LEN);
+-
+ 	spin_lock_bh(&tx_sa->lock);
+ 	tx_sa->next_pn = nla_get_u32(tb_sa[MACSEC_SA_ATTR_PN]);
+ 	spin_unlock_bh(&tx_sa->lock);
+@@ -1710,14 +1856,43 @@ static int macsec_add_txsa(struct sk_buff *skb, struct genl_info *info)
+ 	if (tb_sa[MACSEC_SA_ATTR_ACTIVE])
+ 		tx_sa->active = !!nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]);
+ 
++	was_operational = secy->operational;
+ 	if (assoc_num == tx_sc->encoding_sa && tx_sa->active)
+ 		secy->operational = true;
+ 
++	/* If h/w offloading is available, propagate to the device */
++	if (macsec_is_offloaded(netdev_priv(dev))) {
++		const struct macsec_ops *ops;
++		struct macsec_context ctx;
++
++		ops = macsec_get_ops(netdev_priv(dev), &ctx);
++		if (!ops) {
++			err = -EOPNOTSUPP;
++			goto cleanup;
++		}
++
++		ctx.sa.assoc_num = assoc_num;
++		ctx.sa.tx_sa = tx_sa;
++		memcpy(ctx.sa.key, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]),
++		       MACSEC_KEYID_LEN);
++
++		err = macsec_offload(ops->mdo_add_txsa, &ctx);
++		if (err)
++			goto cleanup;
++	}
++
++	nla_memcpy(tx_sa->key.id, tb_sa[MACSEC_SA_ATTR_KEYID], MACSEC_KEYID_LEN);
+ 	rcu_assign_pointer(tx_sc->sa[assoc_num], tx_sa);
+ 
+ 	rtnl_unlock();
+ 
+ 	return 0;
++
++cleanup:
++	secy->operational = was_operational;
++	kfree(tx_sa);
++	rtnl_unlock();
++	return err;
+ }
+ 
+ static int macsec_del_rxsa(struct sk_buff *skb, struct genl_info *info)
+@@ -1730,6 +1905,7 @@ static int macsec_del_rxsa(struct sk_buff *skb, struct genl_info *info)
+ 	u8 assoc_num;
+ 	struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1];
+ 	struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1];
++	int ret;
+ 
+ 	if (!attrs[MACSEC_ATTR_IFINDEX])
+ 		return -EINVAL;
+@@ -1753,12 +1929,35 @@ static int macsec_del_rxsa(struct sk_buff *skb, struct genl_info *info)
+ 		return -EBUSY;
+ 	}
+ 
++	/* If h/w offloading is available, propagate to the device */
++	if (macsec_is_offloaded(netdev_priv(dev))) {
++		const struct macsec_ops *ops;
++		struct macsec_context ctx;
++
++		ops = macsec_get_ops(netdev_priv(dev), &ctx);
++		if (!ops) {
++			ret = -EOPNOTSUPP;
++			goto cleanup;
++		}
++
++		ctx.sa.assoc_num = assoc_num;
++		ctx.sa.rx_sa = rx_sa;
++
++		ret = macsec_offload(ops->mdo_del_rxsa, &ctx);
++		if (ret)
++			goto cleanup;
++	}
++
+ 	RCU_INIT_POINTER(rx_sc->sa[assoc_num], NULL);
+ 	clear_rx_sa(rx_sa);
+ 
+ 	rtnl_unlock();
+ 
+ 	return 0;
++
++cleanup:
++	rtnl_unlock();
++	return ret;
+ }
+ 
+ static int macsec_del_rxsc(struct sk_buff *skb, struct genl_info *info)
+@@ -1769,6 +1968,7 @@ static int macsec_del_rxsc(struct sk_buff *skb, struct genl_info *info)
+ 	struct macsec_rx_sc *rx_sc;
+ 	sci_t sci;
+ 	struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1];
++	int ret;
+ 
+ 	if (!attrs[MACSEC_ATTR_IFINDEX])
+ 		return -EINVAL;
+@@ -1795,10 +1995,31 @@ static int macsec_del_rxsc(struct sk_buff *skb, struct genl_info *info)
+ 		return -ENODEV;
+ 	}
+ 
++	/* If h/w offloading is available, propagate to the device */
++	if (macsec_is_offloaded(netdev_priv(dev))) {
++		const struct macsec_ops *ops;
++		struct macsec_context ctx;
++
++		ops = macsec_get_ops(netdev_priv(dev), &ctx);
++		if (!ops) {
++			ret = -EOPNOTSUPP;
++			goto cleanup;
++		}
++
++		ctx.rx_sc = rx_sc;
++		ret = macsec_offload(ops->mdo_del_rxsc, &ctx);
++		if (ret)
++			goto cleanup;
++	}
++
+ 	free_rx_sc(rx_sc);
+ 	rtnl_unlock();
+ 
+ 	return 0;
++
++cleanup:
++	rtnl_unlock();
++	return ret;
+ }
+ 
+ static int macsec_del_txsa(struct sk_buff *skb, struct genl_info *info)
+@@ -1810,6 +2031,7 @@ static int macsec_del_txsa(struct sk_buff *skb, struct genl_info *info)
+ 	struct macsec_tx_sa *tx_sa;
+ 	u8 assoc_num;
+ 	struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1];
++	int ret;
+ 
+ 	if (!attrs[MACSEC_ATTR_IFINDEX])
+ 		return -EINVAL;
+@@ -1830,12 +2052,35 @@ static int macsec_del_txsa(struct sk_buff *skb, struct genl_info *info)
+ 		return -EBUSY;
+ 	}
+ 
++	/* If h/w offloading is available, propagate to the device */
++	if (macsec_is_offloaded(netdev_priv(dev))) {
++		const struct macsec_ops *ops;
++		struct macsec_context ctx;
++
++		ops = macsec_get_ops(netdev_priv(dev), &ctx);
++		if (!ops) {
++			ret = -EOPNOTSUPP;
++			goto cleanup;
++		}
++
++		ctx.sa.assoc_num = assoc_num;
++		ctx.sa.tx_sa = tx_sa;
++
++		ret = macsec_offload(ops->mdo_del_txsa, &ctx);
++		if (ret)
++			goto cleanup;
++	}
++
+ 	RCU_INIT_POINTER(tx_sc->sa[assoc_num], NULL);
+ 	clear_tx_sa(tx_sa);
+ 
+ 	rtnl_unlock();
+ 
+ 	return 0;
++
++cleanup:
++	rtnl_unlock();
++	return ret;
+ }
+ 
+ static bool validate_upd_sa(struct nlattr **attrs)
+@@ -1868,6 +2113,9 @@ static int macsec_upd_txsa(struct sk_buff *skb, struct genl_info *info)
+ 	struct macsec_tx_sa *tx_sa;
+ 	u8 assoc_num;
+ 	struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1];
++	bool was_operational, was_active;
++	u32 prev_pn = 0;
++	int ret = 0;
+ 
+ 	if (!attrs[MACSEC_ATTR_IFINDEX])
+ 		return -EINVAL;
+@@ -1888,19 +2136,52 @@ static int macsec_upd_txsa(struct sk_buff *skb, struct genl_info *info)
+ 
+ 	if (tb_sa[MACSEC_SA_ATTR_PN]) {
+ 		spin_lock_bh(&tx_sa->lock);
++		prev_pn = tx_sa->next_pn;
+ 		tx_sa->next_pn = nla_get_u32(tb_sa[MACSEC_SA_ATTR_PN]);
+ 		spin_unlock_bh(&tx_sa->lock);
+ 	}
+ 
++	was_active = tx_sa->active;
+ 	if (tb_sa[MACSEC_SA_ATTR_ACTIVE])
+ 		tx_sa->active = nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]);
+ 
++	was_operational = secy->operational;
+ 	if (assoc_num == tx_sc->encoding_sa)
+ 		secy->operational = tx_sa->active;
+ 
++	/* If h/w offloading is available, propagate to the device */
++	if (macsec_is_offloaded(netdev_priv(dev))) {
++		const struct macsec_ops *ops;
++		struct macsec_context ctx;
++
++		ops = macsec_get_ops(netdev_priv(dev), &ctx);
++		if (!ops) {
++			ret = -EOPNOTSUPP;
++			goto cleanup;
++		}
++
++		ctx.sa.assoc_num = assoc_num;
++		ctx.sa.tx_sa = tx_sa;
++
++		ret = macsec_offload(ops->mdo_upd_txsa, &ctx);
++		if (ret)
++			goto cleanup;
++	}
++
+ 	rtnl_unlock();
+ 
+ 	return 0;
++
++cleanup:
++	if (tb_sa[MACSEC_SA_ATTR_PN]) {
++		spin_lock_bh(&tx_sa->lock);
++		tx_sa->next_pn = prev_pn;
++		spin_unlock_bh(&tx_sa->lock);
++	}
++	tx_sa->active = was_active;
++	secy->operational = was_operational;
++	rtnl_unlock();
++	return ret;
+ }
+ 
+ static int macsec_upd_rxsa(struct sk_buff *skb, struct genl_info *info)
+@@ -1913,6 +2194,9 @@ static int macsec_upd_rxsa(struct sk_buff *skb, struct genl_info *info)
+ 	u8 assoc_num;
+ 	struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1];
+ 	struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1];
++	bool was_active;
++	u32 prev_pn = 0;
++	int ret = 0;
+ 
+ 	if (!attrs[MACSEC_ATTR_IFINDEX])
+ 		return -EINVAL;
+@@ -1936,15 +2220,46 @@ static int macsec_upd_rxsa(struct sk_buff *skb, struct genl_info *info)
+ 
+ 	if (tb_sa[MACSEC_SA_ATTR_PN]) {
+ 		spin_lock_bh(&rx_sa->lock);
++		prev_pn = rx_sa->next_pn;
+ 		rx_sa->next_pn = nla_get_u32(tb_sa[MACSEC_SA_ATTR_PN]);
+ 		spin_unlock_bh(&rx_sa->lock);
+ 	}
+ 
++	was_active = rx_sa->active;
+ 	if (tb_sa[MACSEC_SA_ATTR_ACTIVE])
+ 		rx_sa->active = nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]);
+ 
++	/* If h/w offloading is available, propagate to the device */
++	if (macsec_is_offloaded(netdev_priv(dev))) {
++		const struct macsec_ops *ops;
++		struct macsec_context ctx;
++
++		ops = macsec_get_ops(netdev_priv(dev), &ctx);
++		if (!ops) {
++			ret = -EOPNOTSUPP;
++			goto cleanup;
++		}
++
++		ctx.sa.assoc_num = assoc_num;
++		ctx.sa.rx_sa = rx_sa;
++
++		ret = macsec_offload(ops->mdo_upd_rxsa, &ctx);
++		if (ret)
++			goto cleanup;
++	}
++
+ 	rtnl_unlock();
+ 	return 0;
++
++cleanup:
++	if (tb_sa[MACSEC_SA_ATTR_PN]) {
++		spin_lock_bh(&rx_sa->lock);
++		rx_sa->next_pn = prev_pn;
++		spin_unlock_bh(&rx_sa->lock);
++	}
++	rx_sa->active = was_active;
++	rtnl_unlock();
++	return ret;
+ }
+ 
+ static int macsec_upd_rxsc(struct sk_buff *skb, struct genl_info *info)
+@@ -1954,6 +2269,9 @@ static int macsec_upd_rxsc(struct sk_buff *skb, struct genl_info *info)
+ 	struct macsec_secy *secy;
+ 	struct macsec_rx_sc *rx_sc;
+ 	struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1];
++	unsigned int prev_n_rx_sc;
++	bool was_active;
++	int ret;
+ 
+ 	if (!attrs[MACSEC_ATTR_IFINDEX])
+ 		return -EINVAL;
+@@ -1971,6 +2289,8 @@ static int macsec_upd_rxsc(struct sk_buff *skb, struct genl_info *info)
+ 		return PTR_ERR(rx_sc);
+ 	}
+ 
++	was_active = rx_sc->active;
++	prev_n_rx_sc = secy->n_rx_sc;
+ 	if (tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE]) {
+ 		bool new = !!nla_get_u8(tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE]);
+ 
+@@ -1980,9 +2300,33 @@ static int macsec_upd_rxsc(struct sk_buff *skb, struct genl_info *info)
+ 		rx_sc->active = new;
+ 	}
+ 
++	/* If h/w offloading is available, propagate to the device */
++	if (macsec_is_offloaded(netdev_priv(dev))) {
++		const struct macsec_ops *ops;
++		struct macsec_context ctx;
++
++		ops = macsec_get_ops(netdev_priv(dev), &ctx);
++		if (!ops) {
++			ret = -EOPNOTSUPP;
++			goto cleanup;
++		}
++
++		ctx.rx_sc = rx_sc;
++
++		ret = macsec_offload(ops->mdo_upd_rxsc, &ctx);
++		if (ret)
++			goto cleanup;
++	}
++
+ 	rtnl_unlock();
+ 
+ 	return 0;
++
++cleanup:
++	secy->n_rx_sc = prev_n_rx_sc;
++	rx_sc->active = was_active;
++	rtnl_unlock();
++	return ret;
+ }
+ 
+ static int copy_tx_sa_stats(struct sk_buff *skb,
+@@ -2550,6 +2894,11 @@ static netdev_tx_t macsec_start_xmit(struct sk_buff *skb,
+ 	struct pcpu_secy_stats *secy_stats;
+ 	int ret, len;
+ 
++	if (macsec_is_offloaded(netdev_priv(dev))) {
++		skb->dev = macsec->real_dev;
++		return dev_queue_xmit(skb);
++	}
++
+ 	/* 10.5 */
+ 	if (!secy->protect_frames) {
+ 		secy_stats = this_cpu_ptr(macsec->stats);
+@@ -2663,6 +3012,22 @@ static int macsec_dev_open(struct net_device *dev)
+ 			goto clear_allmulti;
+ 	}
+ 
++	/* If h/w offloading is available, propagate to the device */
++	if (macsec_is_offloaded(macsec)) {
++		const struct macsec_ops *ops;
++		struct macsec_context ctx;
++
++		ops = macsec_get_ops(netdev_priv(dev), &ctx);
++		if (!ops) {
++			err = -EOPNOTSUPP;
++			goto clear_allmulti;
++		}
++
++		err = macsec_offload(ops->mdo_dev_open, &ctx);
++		if (err)
++			goto clear_allmulti;
++	}
++
+ 	if (netif_carrier_ok(real_dev))
+ 		netif_carrier_on(dev);
+ 
+@@ -2683,6 +3048,16 @@ static int macsec_dev_stop(struct net_device *dev)
+ 
+ 	netif_carrier_off(dev);
+ 
++	/* If h/w offloading is available, propagate to the device */
++	if (macsec_is_offloaded(macsec)) {
++		const struct macsec_ops *ops;
++		struct macsec_context ctx;
++
++		ops = macsec_get_ops(macsec, &ctx);
++		if (ops)
++			macsec_offload(ops->mdo_dev_stop, &ctx);
++	}
++
+ 	dev_mc_unsync(real_dev, dev);
+ 	dev_uc_unsync(real_dev, dev);
+ 
+@@ -2914,6 +3289,11 @@ static int macsec_changelink(struct net_device *dev, struct nlattr *tb[],
+ 			     struct nlattr *data[],
+ 			     struct netlink_ext_ack *extack)
+ {
++	struct macsec_dev *macsec = macsec_priv(dev);
++	struct macsec_tx_sa tx_sc;
++	struct macsec_secy secy;
++	int ret;
++
+ 	if (!data)
+ 		return 0;
+ 
+@@ -2923,7 +3303,41 @@ static int macsec_changelink(struct net_device *dev, struct nlattr *tb[],
+ 	    data[IFLA_MACSEC_PORT])
+ 		return -EINVAL;
+ 
+-	return macsec_changelink_common(dev, data);
++	/* Keep a copy of unmodified secy and tx_sc, in case the offload
++	 * propagation fails, to revert macsec_changelink_common.
++	 */
++	memcpy(&secy, &macsec->secy, sizeof(secy));
++	memcpy(&tx_sc, &macsec->secy.tx_sc, sizeof(tx_sc));
++
++	ret = macsec_changelink_common(dev, data);
++	if (ret)
++		return ret;
++
++	/* If h/w offloading is available, propagate to the device */
++	if (macsec_is_offloaded(macsec)) {
++		const struct macsec_ops *ops;
++		struct macsec_context ctx;
++		int ret;
++
++		ops = macsec_get_ops(netdev_priv(dev), &ctx);
++		if (!ops) {
++			ret = -EOPNOTSUPP;
++			goto cleanup;
++		}
++
++		ctx.secy = &macsec->secy;
++		ret = macsec_offload(ops->mdo_upd_secy, &ctx);
++		if (ret)
++			goto cleanup;
++	}
++
++	return 0;
++
++cleanup:
++	memcpy(&macsec->secy.tx_sc, &tx_sc, sizeof(tx_sc));
++	memcpy(&macsec->secy, &secy, sizeof(secy));
++
++	return ret;
+ }
+ 
+ static void macsec_del_dev(struct macsec_dev *macsec)
+@@ -2966,6 +3380,18 @@ static void macsec_dellink(struct net_device *dev, struct list_head *head)
+ 	struct net_device *real_dev = macsec->real_dev;
+ 	struct macsec_rxh_data *rxd = macsec_data_rtnl(real_dev);
+ 
++	/* If h/w offloading is available, propagate to the device */
++	if (macsec_is_offloaded(macsec)) {
++		const struct macsec_ops *ops;
++		struct macsec_context ctx;
++
++		ops = macsec_get_ops(netdev_priv(dev), &ctx);
++		if (ops) {
++			ctx.secy = &macsec->secy;
++			macsec_offload(ops->mdo_del_secy, &ctx);
++		}
++	}
++
+ 	macsec_common_dellink(dev, head);
+ 
+ 	if (list_empty(&rxd->secys)) {
+@@ -3077,6 +3503,9 @@ static int macsec_newlink(struct net *net, struct net_device *dev,
+ 
+ 	macsec->real_dev = real_dev;
+ 
++	/* MACsec offloading is off by default */
++	macsec->offload = MACSEC_OFFLOAD_OFF;
++
+ 	if (data && data[IFLA_MACSEC_ICV_LEN])
+ 		icv_len = nla_get_u8(data[IFLA_MACSEC_ICV_LEN]);
+ 	dev->mtu = real_dev->mtu - icv_len - macsec_extra_len(true);
+-- 
+cgit 1.2.3-1.el7
+
diff --git a/target/linux/mediatek/patches-5.4/999-1756-06-v5.18-net-macsec-add-nla-support-for-changing-the-offloading-selection.patch b/target/linux/mediatek/patches-5.4/999-1756-06-v5.18-net-macsec-add-nla-support-for-changing-the-offloading-selection.patch
new file mode 100644
index 0000000..849d2cd
--- /dev/null
+++ b/target/linux/mediatek/patches-5.4/999-1756-06-v5.18-net-macsec-add-nla-support-for-changing-the-offloading-selection.patch
@@ -0,0 +1,253 @@
+From dcb780fb279514f268826f2e9f4df3bc75610703 Mon Sep 17 00:00:00 2001
+From: Antoine Tenart <antoine.tenart@bootlin.com>
+Date: Mon, 13 Jan 2020 23:31:44 +0100
+Subject: net: macsec: add nla support for changing the offloading selection
+
+MACsec offloading to underlying hardware devices is disabled by default
+(the software implementation is used). This patch adds support for
+changing this setting through the MACsec netlink interface. Many checks
+are done when enabling offloading on a given MACsec interface as there
+are limitations (it must be supported by the hardware, only a single
+interface can be offloaded on a given physical device at a time, rules
+can't be moved for now).
+
+Signed-off-by: Antoine Tenart <antoine.tenart@bootlin.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+---
+ drivers/net/macsec.c           | 145 ++++++++++++++++++++++++++++++++++++++++-
+ include/uapi/linux/if_macsec.h |  11 ++++
+ 2 files changed, 153 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
+index 36b0416381bf1..e515919e8687f 100644
+--- a/drivers/net/macsec.c
++++ b/drivers/net/macsec.c
+@@ -1484,6 +1484,7 @@ static const struct nla_policy macsec_genl_policy[NUM_MACSEC_ATTR] = {
+ 	[MACSEC_ATTR_IFINDEX] = { .type = NLA_U32 },
+ 	[MACSEC_ATTR_RXSC_CONFIG] = { .type = NLA_NESTED },
+ 	[MACSEC_ATTR_SA_CONFIG] = { .type = NLA_NESTED },
++	[MACSEC_ATTR_OFFLOAD] = { .type = NLA_NESTED },
+ };
+ 
+ static const struct nla_policy macsec_genl_rxsc_policy[NUM_MACSEC_RXSC_ATTR] = {
+@@ -1501,6 +1502,10 @@ static const struct nla_policy macsec_genl_sa_policy[NUM_MACSEC_SA_ATTR] = {
+ 				 .len = MACSEC_MAX_KEY_LEN, },
+ };
+ 
++static const struct nla_policy macsec_genl_offload_policy[NUM_MACSEC_OFFLOAD_ATTR] = {
++	[MACSEC_OFFLOAD_ATTR_TYPE] = { .type = NLA_U8 },
++};
++
+ /* Offloads an operation to a device driver */
+ static int macsec_offload(int (* const func)(struct macsec_context *),
+ 			  struct macsec_context *ctx)
+@@ -2329,6 +2334,126 @@ cleanup:
+ 	return ret;
+ }
+ 
++static bool macsec_is_configured(struct macsec_dev *macsec)
++{
++	struct macsec_secy *secy = &macsec->secy;
++	struct macsec_tx_sc *tx_sc = &secy->tx_sc;
++	int i;
++
++	if (secy->n_rx_sc > 0)
++		return true;
++
++	for (i = 0; i < MACSEC_NUM_AN; i++)
++		if (tx_sc->sa[i])
++			return true;
++
++	return false;
++}
++
++static int macsec_upd_offload(struct sk_buff *skb, struct genl_info *info)
++{
++	struct nlattr *tb_offload[MACSEC_OFFLOAD_ATTR_MAX + 1];
++	enum macsec_offload offload, prev_offload;
++	int (*func)(struct macsec_context *ctx);
++	struct nlattr **attrs = info->attrs;
++	struct net_device *dev, *loop_dev;
++	const struct macsec_ops *ops;
++	struct macsec_context ctx;
++	struct macsec_dev *macsec;
++	struct net *loop_net;
++	int ret;
++
++	if (!attrs[MACSEC_ATTR_IFINDEX])
++		return -EINVAL;
++
++	if (!attrs[MACSEC_ATTR_OFFLOAD])
++		return -EINVAL;
++
++	if (nla_parse_nested_deprecated(tb_offload, MACSEC_OFFLOAD_ATTR_MAX,
++					attrs[MACSEC_ATTR_OFFLOAD],
++					macsec_genl_offload_policy, NULL))
++		return -EINVAL;
++
++	dev = get_dev_from_nl(genl_info_net(info), attrs);
++	if (IS_ERR(dev))
++		return PTR_ERR(dev);
++	macsec = macsec_priv(dev);
++
++	offload = nla_get_u8(tb_offload[MACSEC_OFFLOAD_ATTR_TYPE]);
++	if (macsec->offload == offload)
++		return 0;
++
++	/* Check if the offloading mode is supported by the underlying layers */
++	if (offload != MACSEC_OFFLOAD_OFF &&
++	    !macsec_check_offload(offload, macsec))
++		return -EOPNOTSUPP;
++
++	if (offload == MACSEC_OFFLOAD_OFF)
++		goto skip_limitation;
++
++	/* Check the physical interface isn't offloading another interface
++	 * first.
++	 */
++	for_each_net(loop_net) {
++		for_each_netdev(loop_net, loop_dev) {
++			struct macsec_dev *priv;
++
++			if (!netif_is_macsec(loop_dev))
++				continue;
++
++			priv = macsec_priv(loop_dev);
++
++			if (priv->real_dev == macsec->real_dev &&
++			    priv->offload != MACSEC_OFFLOAD_OFF)
++				return -EBUSY;
++		}
++	}
++
++skip_limitation:
++	/* Check if the net device is busy. */
++	if (netif_running(dev))
++		return -EBUSY;
++
++	rtnl_lock();
++
++	prev_offload = macsec->offload;
++	macsec->offload = offload;
++
++	/* Check if the device already has rules configured: we do not support
++	 * rules migration.
++	 */
++	if (macsec_is_configured(macsec)) {
++		ret = -EBUSY;
++		goto rollback;
++	}
++
++	ops = __macsec_get_ops(offload == MACSEC_OFFLOAD_OFF ? prev_offload : offload,
++			       macsec, &ctx);
++	if (!ops) {
++		ret = -EOPNOTSUPP;
++		goto rollback;
++	}
++
++	if (prev_offload == MACSEC_OFFLOAD_OFF)
++		func = ops->mdo_add_secy;
++	else
++		func = ops->mdo_del_secy;
++
++	ctx.secy = &macsec->secy;
++	ret = macsec_offload(func, &ctx);
++	if (ret)
++		goto rollback;
++
++	rtnl_unlock();
++	return 0;
++
++rollback:
++	macsec->offload = prev_offload;
++
++	rtnl_unlock();
++	return ret;
++}
++
+ static int copy_tx_sa_stats(struct sk_buff *skb,
+ 			    struct macsec_tx_sa_stats __percpu *pstats)
+ {
+@@ -2590,12 +2715,13 @@ static noinline_for_stack int
+ dump_secy(struct macsec_secy *secy, struct net_device *dev,
+ 	  struct sk_buff *skb, struct netlink_callback *cb)
+ {
+-	struct macsec_rx_sc *rx_sc;
++	struct macsec_dev *macsec = netdev_priv(dev);
+ 	struct macsec_tx_sc *tx_sc = &secy->tx_sc;
+ 	struct nlattr *txsa_list, *rxsc_list;
+-	int i, j;
+-	void *hdr;
++	struct macsec_rx_sc *rx_sc;
+ 	struct nlattr *attr;
++	void *hdr;
++	int i, j;
+ 
+ 	hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
+ 			  &macsec_fam, NLM_F_MULTI, MACSEC_CMD_GET_TXSC);
+@@ -2607,6 +2733,13 @@ dump_secy(struct macsec_secy *secy, struct net_device *dev,
+ 	if (nla_put_u32(skb, MACSEC_ATTR_IFINDEX, dev->ifindex))
+ 		goto nla_put_failure;
+ 
++	attr = nla_nest_start_noflag(skb, MACSEC_ATTR_OFFLOAD);
++	if (!attr)
++		goto nla_put_failure;
++	if (nla_put_u8(skb, MACSEC_OFFLOAD_ATTR_TYPE, macsec->offload))
++		goto nla_put_failure;
++	nla_nest_end(skb, attr);
++
+ 	if (nla_put_secy(secy, skb))
+ 		goto nla_put_failure;
+ 
+@@ -2872,6 +3005,12 @@ static const struct genl_ops macsec_genl_ops[] = {
+ 		.doit = macsec_upd_rxsa,
+ 		.flags = GENL_ADMIN_PERM,
+ 	},
++	{
++		.cmd = MACSEC_CMD_UPD_OFFLOAD,
++		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
++		.doit = macsec_upd_offload,
++		.flags = GENL_ADMIN_PERM,
++	},
+ };
+ 
+ static struct genl_family macsec_fam __ro_after_init = {
+diff --git a/include/uapi/linux/if_macsec.h b/include/uapi/linux/if_macsec.h
+index 98e4d5d7c45ca..1d63c43c38cca 100644
+--- a/include/uapi/linux/if_macsec.h
++++ b/include/uapi/linux/if_macsec.h
+@@ -45,6 +45,7 @@ enum macsec_attrs {
+ 	MACSEC_ATTR_RXSC_LIST,   /* dump, nested, macsec_rxsc_attrs for each RXSC */
+ 	MACSEC_ATTR_TXSC_STATS,  /* dump, nested, macsec_txsc_stats_attr */
+ 	MACSEC_ATTR_SECY_STATS,  /* dump, nested, macsec_secy_stats_attr */
++	MACSEC_ATTR_OFFLOAD,     /* config, nested, macsec_offload_attrs */
+ 	__MACSEC_ATTR_END,
+ 	NUM_MACSEC_ATTR = __MACSEC_ATTR_END,
+ 	MACSEC_ATTR_MAX = __MACSEC_ATTR_END - 1,
+@@ -97,6 +98,15 @@ enum macsec_sa_attrs {
+ 	MACSEC_SA_ATTR_MAX = __MACSEC_SA_ATTR_END - 1,
+ };
+ 
++enum macsec_offload_attrs {
++	MACSEC_OFFLOAD_ATTR_UNSPEC,
++	MACSEC_OFFLOAD_ATTR_TYPE, /* config/dump, u8 0..2 */
++	MACSEC_OFFLOAD_ATTR_PAD,
++	__MACSEC_OFFLOAD_ATTR_END,
++	NUM_MACSEC_OFFLOAD_ATTR = __MACSEC_OFFLOAD_ATTR_END,
++	MACSEC_OFFLOAD_ATTR_MAX = __MACSEC_OFFLOAD_ATTR_END - 1,
++};
++
+ enum macsec_nl_commands {
+ 	MACSEC_CMD_GET_TXSC,
+ 	MACSEC_CMD_ADD_RXSC,
+@@ -108,6 +118,7 @@ enum macsec_nl_commands {
+ 	MACSEC_CMD_ADD_RXSA,
+ 	MACSEC_CMD_DEL_RXSA,
+ 	MACSEC_CMD_UPD_RXSA,
++	MACSEC_CMD_UPD_OFFLOAD,
+ };
+ 
+ /* u64 per-RXSC stats */
+-- 
+cgit 1.2.3-1.el7
+
diff --git a/target/linux/mediatek/patches-5.4/999-1757-07-v5.18-net-phy-mscc-macsec-initialization.patch b/target/linux/mediatek/patches-5.4/999-1757-07-v5.18-net-phy-mscc-macsec-initialization.patch
new file mode 100644
index 0000000..869e7b6
--- /dev/null
+++ b/target/linux/mediatek/patches-5.4/999-1757-07-v5.18-net-phy-mscc-macsec-initialization.patch
@@ -0,0 +1,953 @@
+From 1bbe0ecc2a1a008bcfeb7fd2d8f95c8e9a1867c6 Mon Sep 17 00:00:00 2001
+From: Antoine Tenart <antoine.tenart@bootlin.com>
+Date: Mon, 13 Jan 2020 23:31:45 +0100
+Subject: net: phy: mscc: macsec initialization
+
+This patch adds support for initializing the MACsec engine found within
+some Microsemi PHYs. The engine is initialized in a passthrough mode and
+does not modify any incoming or outgoing packet. But thanks to this it
+now can be configured to perform MACsec transformations on packets,
+which will be supported by a future patch.
+
+The MACsec read and write functions are wrapped into two versions: one
+called during the init phase, and the other one later on. This is
+because the init functions in the Microsemi PHY driver are called while
+the MDIO bus lock is taken.
+
+Signed-off-by: Antoine Tenart <antoine.tenart@bootlin.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+---
+ drivers/net/phy/mscc.c           | 382 +++++++++++++++++++++++++++++++++++++++
+ drivers/net/phy/mscc_fc_buffer.h |  64 +++++++
+ drivers/net/phy/mscc_mac.h       | 159 ++++++++++++++++
+ drivers/net/phy/mscc_macsec.h    | 260 ++++++++++++++++++++++++++
+ 4 files changed, 865 insertions(+)
+ create mode 100644 drivers/net/phy/mscc_fc_buffer.h
+ create mode 100644 drivers/net/phy/mscc_mac.h
+ create mode 100644 drivers/net/phy/mscc_macsec.h
+
+diff --git a/drivers/net/phy/mscc.c b/drivers/net/phy/mscc.c
+index 50214c081164f..8579a59a1336a 100644
+--- a/drivers/net/phy/mscc.c
++++ b/drivers/net/phy/mscc.c
+@@ -18,6 +18,10 @@
+ #include <linux/netdevice.h>
+ #include <dt-bindings/net/mscc-phy-vsc8531.h>
+ 
++#include "mscc_macsec.h"
++#include "mscc_mac.h"
++#include "mscc_fc_buffer.h"
++
+ enum rgmii_rx_clock_delay {
+ 	RGMII_RX_CLK_DELAY_0_2_NS = 0,
+ 	RGMII_RX_CLK_DELAY_0_8_NS = 1,
+@@ -121,6 +125,26 @@ enum rgmii_rx_clock_delay {
+ #define PHY_S6G_PLL_FSM_CTRL_DATA_POS	  8
+ #define PHY_S6G_PLL_FSM_ENA_POS		  7
+ 
++#define MSCC_EXT_PAGE_MACSEC_17		  17
++#define MSCC_EXT_PAGE_MACSEC_18		  18
++
++#define MSCC_EXT_PAGE_MACSEC_19		  19
++#define MSCC_PHY_MACSEC_19_REG_ADDR(x)	  (x)
++#define MSCC_PHY_MACSEC_19_TARGET(x)	  ((x) << 12)
++#define MSCC_PHY_MACSEC_19_READ		  BIT(14)
++#define MSCC_PHY_MACSEC_19_CMD		  BIT(15)
++
++#define MSCC_EXT_PAGE_MACSEC_20		  20
++#define MSCC_PHY_MACSEC_20_TARGET(x)	  (x)
++enum macsec_bank {
++	FC_BUFFER   = 0x04,
++	HOST_MAC    = 0x05,
++	LINE_MAC    = 0x06,
++	IP_1588     = 0x0e,
++	MACSEC_INGR = 0x38,
++	MACSEC_EGR  = 0x3c,
++};
++
+ #define MSCC_EXT_PAGE_ACCESS		  31
+ #define MSCC_PHY_PAGE_STANDARD		  0x0000 /* Standard registers */
+ #define MSCC_PHY_PAGE_EXTENDED		  0x0001 /* Extended registers */
+@@ -128,6 +152,7 @@ enum rgmii_rx_clock_delay {
+ #define MSCC_PHY_PAGE_EXTENDED_3	  0x0003 /* Extended reg - page 3 */
+ #define MSCC_PHY_PAGE_EXTENDED_4	  0x0004 /* Extended reg - page 4 */
+ #define MSCC_PHY_PAGE_CSR_CNTL		  MSCC_PHY_PAGE_EXTENDED_4
++#define MSCC_PHY_PAGE_MACSEC		  MSCC_PHY_PAGE_EXTENDED_4
+ /* Extended reg - GPIO; this is a bank of registers that are shared for all PHYs
+  * in the same package.
+  */
+@@ -1584,6 +1609,350 @@ out:
+ 	return ret;
+ }
+ 
++#if IS_ENABLED(CONFIG_MACSEC)
++static u32 vsc8584_macsec_phy_read(struct phy_device *phydev,
++				   enum macsec_bank bank, u32 reg)
++{
++	u32 val, val_l = 0, val_h = 0;
++	unsigned long deadline;
++	int rc;
++
++	rc = phy_select_page(phydev, MSCC_PHY_PAGE_MACSEC);
++	if (rc < 0)
++		goto failed;
++
++	__phy_write(phydev, MSCC_EXT_PAGE_MACSEC_20,
++		    MSCC_PHY_MACSEC_20_TARGET(bank >> 2));
++
++	if (bank >> 2 == 0x1)
++		/* non-MACsec access */
++		bank &= 0x3;
++	else
++		bank = 0;
++
++	__phy_write(phydev, MSCC_EXT_PAGE_MACSEC_19,
++		    MSCC_PHY_MACSEC_19_CMD | MSCC_PHY_MACSEC_19_READ |
++		    MSCC_PHY_MACSEC_19_REG_ADDR(reg) |
++		    MSCC_PHY_MACSEC_19_TARGET(bank));
++
++	deadline = jiffies + msecs_to_jiffies(PROC_CMD_NCOMPLETED_TIMEOUT_MS);
++	do {
++		val = __phy_read(phydev, MSCC_EXT_PAGE_MACSEC_19);
++	} while (time_before(jiffies, deadline) && !(val & MSCC_PHY_MACSEC_19_CMD));
++
++	val_l = __phy_read(phydev, MSCC_EXT_PAGE_MACSEC_17);
++	val_h = __phy_read(phydev, MSCC_EXT_PAGE_MACSEC_18);
++
++failed:
++	phy_restore_page(phydev, rc, rc);
++
++	return (val_h << 16) | val_l;
++}
++
++static void vsc8584_macsec_phy_write(struct phy_device *phydev,
++				     enum macsec_bank bank, u32 reg, u32 val)
++{
++	unsigned long deadline;
++	int rc;
++
++	rc = phy_select_page(phydev, MSCC_PHY_PAGE_MACSEC);
++	if (rc < 0)
++		goto failed;
++
++	__phy_write(phydev, MSCC_EXT_PAGE_MACSEC_20,
++		    MSCC_PHY_MACSEC_20_TARGET(bank >> 2));
++
++	if ((bank >> 2 == 0x1) || (bank >> 2 == 0x3))
++		bank &= 0x3;
++	else
++		/* MACsec access */
++		bank = 0;
++
++	__phy_write(phydev, MSCC_EXT_PAGE_MACSEC_17, (u16)val);
++	__phy_write(phydev, MSCC_EXT_PAGE_MACSEC_18, (u16)(val >> 16));
++
++	__phy_write(phydev, MSCC_EXT_PAGE_MACSEC_19,
++		    MSCC_PHY_MACSEC_19_CMD | MSCC_PHY_MACSEC_19_REG_ADDR(reg) |
++		    MSCC_PHY_MACSEC_19_TARGET(bank));
++
++	deadline = jiffies + msecs_to_jiffies(PROC_CMD_NCOMPLETED_TIMEOUT_MS);
++	do {
++		val = __phy_read(phydev, MSCC_EXT_PAGE_MACSEC_19);
++	} while (time_before(jiffies, deadline) && !(val & MSCC_PHY_MACSEC_19_CMD));
++
++failed:
++	phy_restore_page(phydev, rc, rc);
++}
++
++static void vsc8584_macsec_classification(struct phy_device *phydev,
++					  enum macsec_bank bank)
++{
++	/* enable VLAN tag parsing */
++	vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_SAM_CP_TAG,
++				 MSCC_MS_SAM_CP_TAG_PARSE_STAG |
++				 MSCC_MS_SAM_CP_TAG_PARSE_QTAG |
++				 MSCC_MS_SAM_CP_TAG_PARSE_QINQ);
++}
++
++static void vsc8584_macsec_flow_default_action(struct phy_device *phydev,
++					       enum macsec_bank bank,
++					       bool block)
++{
++	u32 port = (bank == MACSEC_INGR) ?
++		    MSCC_MS_PORT_UNCONTROLLED : MSCC_MS_PORT_COMMON;
++	u32 action = MSCC_MS_FLOW_BYPASS;
++
++	if (block)
++		action = MSCC_MS_FLOW_DROP;
++
++	vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_SAM_NM_FLOW_NCP,
++				 /* MACsec untagged */
++				 MSCC_MS_SAM_NM_FLOW_NCP_UNTAGGED_FLOW_TYPE(action) |
++				 MSCC_MS_SAM_NM_FLOW_NCP_UNTAGGED_DROP_ACTION(MSCC_MS_ACTION_DROP) |
++				 MSCC_MS_SAM_NM_FLOW_NCP_UNTAGGED_DEST_PORT(port) |
++				 /* MACsec tagged */
++				 MSCC_MS_SAM_NM_FLOW_NCP_TAGGED_FLOW_TYPE(action) |
++				 MSCC_MS_SAM_NM_FLOW_NCP_TAGGED_DROP_ACTION(MSCC_MS_ACTION_DROP) |
++				 MSCC_MS_SAM_NM_FLOW_NCP_TAGGED_DEST_PORT(port) |
++				 /* Bad tag */
++				 MSCC_MS_SAM_NM_FLOW_NCP_BADTAG_FLOW_TYPE(action) |
++				 MSCC_MS_SAM_NM_FLOW_NCP_BADTAG_DROP_ACTION(MSCC_MS_ACTION_DROP) |
++				 MSCC_MS_SAM_NM_FLOW_NCP_BADTAG_DEST_PORT(port) |
++				 /* Kay tag */
++				 MSCC_MS_SAM_NM_FLOW_NCP_KAY_FLOW_TYPE(action) |
++				 MSCC_MS_SAM_NM_FLOW_NCP_KAY_DROP_ACTION(MSCC_MS_ACTION_DROP) |
++				 MSCC_MS_SAM_NM_FLOW_NCP_KAY_DEST_PORT(port));
++	vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_SAM_NM_FLOW_CP,
++				 /* MACsec untagged */
++				 MSCC_MS_SAM_NM_FLOW_NCP_UNTAGGED_FLOW_TYPE(action) |
++				 MSCC_MS_SAM_NM_FLOW_CP_UNTAGGED_DROP_ACTION(MSCC_MS_ACTION_DROP) |
++				 MSCC_MS_SAM_NM_FLOW_CP_UNTAGGED_DEST_PORT(port) |
++				 /* MACsec tagged */
++				 MSCC_MS_SAM_NM_FLOW_NCP_TAGGED_FLOW_TYPE(action) |
++				 MSCC_MS_SAM_NM_FLOW_CP_TAGGED_DROP_ACTION(MSCC_MS_ACTION_DROP) |
++				 MSCC_MS_SAM_NM_FLOW_CP_TAGGED_DEST_PORT(port) |
++				 /* Bad tag */
++				 MSCC_MS_SAM_NM_FLOW_NCP_BADTAG_FLOW_TYPE(action) |
++				 MSCC_MS_SAM_NM_FLOW_CP_BADTAG_DROP_ACTION(MSCC_MS_ACTION_DROP) |
++				 MSCC_MS_SAM_NM_FLOW_CP_BADTAG_DEST_PORT(port) |
++				 /* Kay tag */
++				 MSCC_MS_SAM_NM_FLOW_NCP_KAY_FLOW_TYPE(action) |
++				 MSCC_MS_SAM_NM_FLOW_CP_KAY_DROP_ACTION(MSCC_MS_ACTION_DROP) |
++				 MSCC_MS_SAM_NM_FLOW_CP_KAY_DEST_PORT(port));
++}
++
++static void vsc8584_macsec_integrity_checks(struct phy_device *phydev,
++					    enum macsec_bank bank)
++{
++	u32 val;
++
++	if (bank != MACSEC_INGR)
++		return;
++
++	/* Set default rules to pass unmatched frames */
++	val = vsc8584_macsec_phy_read(phydev, bank,
++				      MSCC_MS_PARAMS2_IG_CC_CONTROL);
++	val |= MSCC_MS_PARAMS2_IG_CC_CONTROL_NON_MATCH_CTRL_ACT |
++	       MSCC_MS_PARAMS2_IG_CC_CONTROL_NON_MATCH_ACT;
++	vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_PARAMS2_IG_CC_CONTROL,
++				 val);
++
++	vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_PARAMS2_IG_CP_TAG,
++				 MSCC_MS_PARAMS2_IG_CP_TAG_PARSE_STAG |
++				 MSCC_MS_PARAMS2_IG_CP_TAG_PARSE_QTAG |
++				 MSCC_MS_PARAMS2_IG_CP_TAG_PARSE_QINQ);
++}
++
++static void vsc8584_macsec_block_init(struct phy_device *phydev,
++				      enum macsec_bank bank)
++{
++	u32 val;
++	int i;
++
++	vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_ENA_CFG,
++				 MSCC_MS_ENA_CFG_SW_RST |
++				 MSCC_MS_ENA_CFG_MACSEC_BYPASS_ENA);
++
++	/* Set the MACsec block out of s/w reset and enable clocks */
++	vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_ENA_CFG,
++				 MSCC_MS_ENA_CFG_CLK_ENA);
++
++	vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_STATUS_CONTEXT_CTRL,
++				 bank == MACSEC_INGR ? 0xe5880214 : 0xe5880218);
++	vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_MISC_CONTROL,
++				 MSCC_MS_MISC_CONTROL_MC_LATENCY_FIX(bank == MACSEC_INGR ? 57 : 40) |
++				 MSCC_MS_MISC_CONTROL_XFORM_REC_SIZE(bank == MACSEC_INGR ? 1 : 2));
++
++	/* Clear the counters */
++	val = vsc8584_macsec_phy_read(phydev, bank, MSCC_MS_COUNT_CONTROL);
++	val |= MSCC_MS_COUNT_CONTROL_AUTO_CNTR_RESET;
++	vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_COUNT_CONTROL, val);
++
++	/* Enable octet increment mode */
++	vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_PP_CTRL,
++				 MSCC_MS_PP_CTRL_MACSEC_OCTET_INCR_MODE);
++
++	vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_BLOCK_CTX_UPDATE, 0x3);
++
++	val = vsc8584_macsec_phy_read(phydev, bank, MSCC_MS_COUNT_CONTROL);
++	val |= MSCC_MS_COUNT_CONTROL_RESET_ALL;
++	vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_COUNT_CONTROL, val);
++
++	/* Set the MTU */
++	vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_NON_VLAN_MTU_CHECK,
++				 MSCC_MS_NON_VLAN_MTU_CHECK_NV_MTU_COMPARE(32761) |
++				 MSCC_MS_NON_VLAN_MTU_CHECK_NV_MTU_COMP_DROP);
++
++	for (i = 0; i < 8; i++)
++		vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_VLAN_MTU_CHECK(i),
++					 MSCC_MS_VLAN_MTU_CHECK_MTU_COMPARE(32761) |
++					 MSCC_MS_VLAN_MTU_CHECK_MTU_COMP_DROP);
++
++	if (bank == MACSEC_EGR) {
++		val = vsc8584_macsec_phy_read(phydev, bank, MSCC_MS_INTR_CTRL_STATUS);
++		val &= ~MSCC_MS_INTR_CTRL_STATUS_INTR_ENABLE_M;
++		vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_INTR_CTRL_STATUS, val);
++
++		vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_FC_CFG,
++					 MSCC_MS_FC_CFG_FCBUF_ENA |
++					 MSCC_MS_FC_CFG_LOW_THRESH(0x1) |
++					 MSCC_MS_FC_CFG_HIGH_THRESH(0x4) |
++					 MSCC_MS_FC_CFG_LOW_BYTES_VAL(0x4) |
++					 MSCC_MS_FC_CFG_HIGH_BYTES_VAL(0x6));
++	}
++
++	vsc8584_macsec_classification(phydev, bank);
++	vsc8584_macsec_flow_default_action(phydev, bank, false);
++	vsc8584_macsec_integrity_checks(phydev, bank);
++
++	/* Enable the MACsec block */
++	vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_ENA_CFG,
++				 MSCC_MS_ENA_CFG_CLK_ENA |
++				 MSCC_MS_ENA_CFG_MACSEC_ENA |
++				 MSCC_MS_ENA_CFG_MACSEC_SPEED_MODE(0x5));
++}
++
++static void vsc8584_macsec_mac_init(struct phy_device *phydev,
++				    enum macsec_bank bank)
++{
++	u32 val;
++	int i;
++
++	/* Clear host & line stats */
++	for (i = 0; i < 36; i++)
++		vsc8584_macsec_phy_write(phydev, bank, 0x1c + i, 0);
++
++	val = vsc8584_macsec_phy_read(phydev, bank,
++				      MSCC_MAC_PAUSE_CFG_TX_FRAME_CTRL);
++	val &= ~MSCC_MAC_PAUSE_CFG_TX_FRAME_CTRL_PAUSE_MODE_M;
++	val |= MSCC_MAC_PAUSE_CFG_TX_FRAME_CTRL_PAUSE_MODE(2) |
++	       MSCC_MAC_PAUSE_CFG_TX_FRAME_CTRL_PAUSE_VALUE(0xffff);
++	vsc8584_macsec_phy_write(phydev, bank,
++				 MSCC_MAC_PAUSE_CFG_TX_FRAME_CTRL, val);
++
++	val = vsc8584_macsec_phy_read(phydev, bank,
++				      MSCC_MAC_PAUSE_CFG_TX_FRAME_CTRL_2);
++	val |= 0xffff;
++	vsc8584_macsec_phy_write(phydev, bank,
++				 MSCC_MAC_PAUSE_CFG_TX_FRAME_CTRL_2, val);
++
++	val = vsc8584_macsec_phy_read(phydev, bank,
++				      MSCC_MAC_PAUSE_CFG_RX_FRAME_CTRL);
++	if (bank == HOST_MAC)
++		val |= MSCC_MAC_PAUSE_CFG_RX_FRAME_CTRL_PAUSE_TIMER_ENA |
++		       MSCC_MAC_PAUSE_CFG_RX_FRAME_CTRL_PAUSE_FRAME_DROP_ENA;
++	else
++		val |= MSCC_MAC_PAUSE_CFG_RX_FRAME_CTRL_PAUSE_REACT_ENA |
++		       MSCC_MAC_PAUSE_CFG_RX_FRAME_CTRL_PAUSE_FRAME_DROP_ENA |
++		       MSCC_MAC_PAUSE_CFG_RX_FRAME_CTRL_PAUSE_MODE |
++		       MSCC_MAC_PAUSE_CFG_RX_FRAME_CTRL_EARLY_PAUSE_DETECT_ENA;
++	vsc8584_macsec_phy_write(phydev, bank,
++				 MSCC_MAC_PAUSE_CFG_RX_FRAME_CTRL, val);
++
++	vsc8584_macsec_phy_write(phydev, bank, MSCC_MAC_CFG_PKTINF_CFG,
++				 MSCC_MAC_CFG_PKTINF_CFG_STRIP_FCS_ENA |
++				 MSCC_MAC_CFG_PKTINF_CFG_INSERT_FCS_ENA |
++				 MSCC_MAC_CFG_PKTINF_CFG_LPI_RELAY_ENA |
++				 MSCC_MAC_CFG_PKTINF_CFG_STRIP_PREAMBLE_ENA |
++				 MSCC_MAC_CFG_PKTINF_CFG_INSERT_PREAMBLE_ENA |
++				 (bank == HOST_MAC ?
++				  MSCC_MAC_CFG_PKTINF_CFG_ENABLE_TX_PADDING : 0));
++
++	val = vsc8584_macsec_phy_read(phydev, bank, MSCC_MAC_CFG_MODE_CFG);
++	val &= ~MSCC_MAC_CFG_MODE_CFG_DISABLE_DIC;
++	vsc8584_macsec_phy_write(phydev, bank, MSCC_MAC_CFG_MODE_CFG, val);
++
++	val = vsc8584_macsec_phy_read(phydev, bank, MSCC_MAC_CFG_MAXLEN_CFG);
++	val &= ~MSCC_MAC_CFG_MAXLEN_CFG_MAX_LEN_M;
++	val |= MSCC_MAC_CFG_MAXLEN_CFG_MAX_LEN(10240);
++	vsc8584_macsec_phy_write(phydev, bank, MSCC_MAC_CFG_MAXLEN_CFG, val);
++
++	vsc8584_macsec_phy_write(phydev, bank, MSCC_MAC_CFG_ADV_CHK_CFG,
++				 MSCC_MAC_CFG_ADV_CHK_CFG_SFD_CHK_ENA |
++				 MSCC_MAC_CFG_ADV_CHK_CFG_PRM_CHK_ENA |
++				 MSCC_MAC_CFG_ADV_CHK_CFG_OOR_ERR_ENA |
++				 MSCC_MAC_CFG_ADV_CHK_CFG_INR_ERR_ENA);
++
++	val = vsc8584_macsec_phy_read(phydev, bank, MSCC_MAC_CFG_LFS_CFG);
++	val &= ~MSCC_MAC_CFG_LFS_CFG_LFS_MODE_ENA;
++	vsc8584_macsec_phy_write(phydev, bank, MSCC_MAC_CFG_LFS_CFG, val);
++
++	vsc8584_macsec_phy_write(phydev, bank, MSCC_MAC_CFG_ENA_CFG,
++				 MSCC_MAC_CFG_ENA_CFG_RX_CLK_ENA |
++				 MSCC_MAC_CFG_ENA_CFG_TX_CLK_ENA |
++				 MSCC_MAC_CFG_ENA_CFG_RX_ENA |
++				 MSCC_MAC_CFG_ENA_CFG_TX_ENA);
++}
++
++/* Must be called with mdio_lock taken */
++static int vsc8584_macsec_init(struct phy_device *phydev)
++{
++	u32 val;
++
++	vsc8584_macsec_block_init(phydev, MACSEC_INGR);
++	vsc8584_macsec_block_init(phydev, MACSEC_EGR);
++	vsc8584_macsec_mac_init(phydev, HOST_MAC);
++	vsc8584_macsec_mac_init(phydev, LINE_MAC);
++
++	vsc8584_macsec_phy_write(phydev, FC_BUFFER,
++				 MSCC_FCBUF_FC_READ_THRESH_CFG,
++				 MSCC_FCBUF_FC_READ_THRESH_CFG_TX_THRESH(4) |
++				 MSCC_FCBUF_FC_READ_THRESH_CFG_RX_THRESH(5));
++
++	val = vsc8584_macsec_phy_read(phydev, FC_BUFFER, MSCC_FCBUF_MODE_CFG);
++	val |= MSCC_FCBUF_MODE_CFG_PAUSE_GEN_ENA |
++	       MSCC_FCBUF_MODE_CFG_RX_PPM_RATE_ADAPT_ENA |
++	       MSCC_FCBUF_MODE_CFG_TX_PPM_RATE_ADAPT_ENA;
++	vsc8584_macsec_phy_write(phydev, FC_BUFFER, MSCC_FCBUF_MODE_CFG, val);
++
++	vsc8584_macsec_phy_write(phydev, FC_BUFFER, MSCC_FCBUF_PPM_RATE_ADAPT_THRESH_CFG,
++				 MSCC_FCBUF_PPM_RATE_ADAPT_THRESH_CFG_TX_THRESH(8) |
++				 MSCC_FCBUF_PPM_RATE_ADAPT_THRESH_CFG_TX_OFFSET(9));
++
++	val = vsc8584_macsec_phy_read(phydev, FC_BUFFER,
++				      MSCC_FCBUF_TX_DATA_QUEUE_CFG);
++	val &= ~(MSCC_FCBUF_TX_DATA_QUEUE_CFG_START_M |
++		 MSCC_FCBUF_TX_DATA_QUEUE_CFG_END_M);
++	val |= MSCC_FCBUF_TX_DATA_QUEUE_CFG_START(0) |
++		MSCC_FCBUF_TX_DATA_QUEUE_CFG_END(5119);
++	vsc8584_macsec_phy_write(phydev, FC_BUFFER,
++				 MSCC_FCBUF_TX_DATA_QUEUE_CFG, val);
++
++	val = vsc8584_macsec_phy_read(phydev, FC_BUFFER, MSCC_FCBUF_ENA_CFG);
++	val |= MSCC_FCBUF_ENA_CFG_TX_ENA | MSCC_FCBUF_ENA_CFG_RX_ENA;
++	vsc8584_macsec_phy_write(phydev, FC_BUFFER, MSCC_FCBUF_ENA_CFG, val);
++
++	val = vsc8584_macsec_phy_read(phydev, IP_1588,
++				      MSCC_PROC_0_IP_1588_TOP_CFG_STAT_MODE_CTL);
++	val &= ~MSCC_PROC_0_IP_1588_TOP_CFG_STAT_MODE_CTL_PROTOCOL_MODE_M;
++	val |= MSCC_PROC_0_IP_1588_TOP_CFG_STAT_MODE_CTL_PROTOCOL_MODE(4);
++	vsc8584_macsec_phy_write(phydev, IP_1588,
++				 MSCC_PROC_0_IP_1588_TOP_CFG_STAT_MODE_CTL, val);
++
++	return 0;
++}
++#endif /* CONFIG_MACSEC */
++
+ /* Check if one PHY has already done the init of the parts common to all PHYs
+  * in the Quad PHY package.
+  */
+@@ -1733,6 +2102,19 @@ static int vsc8584_config_init(struct phy_device *phydev)
+ 
+ 	mutex_unlock(&phydev->mdio.bus->mdio_lock);
+ 
++#if IS_ENABLED(CONFIG_MACSEC)
++	/* MACsec */
++	switch (phydev->phy_id & phydev->drv->phy_id_mask) {
++	case PHY_ID_VSC856X:
++	case PHY_ID_VSC8575:
++	case PHY_ID_VSC8582:
++	case PHY_ID_VSC8584:
++		ret = vsc8584_macsec_init(phydev);
++		if (ret)
++			goto err;
++	}
++#endif
++
+ 	phy_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_STANDARD);
+ 
+ 	val = phy_read(phydev, MSCC_PHY_EXT_PHY_CNTL_1);
+diff --git a/drivers/net/phy/mscc_fc_buffer.h b/drivers/net/phy/mscc_fc_buffer.h
+new file mode 100644
+index 0000000000000..7e9c0e8778952
+--- /dev/null
++++ b/drivers/net/phy/mscc_fc_buffer.h
+@@ -0,0 +1,64 @@
++/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
++/*
++ * Microsemi Ocelot Switch driver
++ *
++ * Copyright (C) 2019 Microsemi Corporation
++ */
++
++#ifndef _MSCC_OCELOT_FC_BUFFER_H_
++#define _MSCC_OCELOT_FC_BUFFER_H_
++
++#define MSCC_FCBUF_ENA_CFG					0x00
++#define MSCC_FCBUF_MODE_CFG					0x01
++#define MSCC_FCBUF_PPM_RATE_ADAPT_THRESH_CFG			0x02
++#define MSCC_FCBUF_TX_CTRL_QUEUE_CFG				0x03
++#define MSCC_FCBUF_TX_DATA_QUEUE_CFG				0x04
++#define MSCC_FCBUF_RX_DATA_QUEUE_CFG				0x05
++#define MSCC_FCBUF_TX_BUFF_XON_XOFF_THRESH_CFG			0x06
++#define MSCC_FCBUF_FC_READ_THRESH_CFG				0x07
++#define MSCC_FCBUF_TX_FRM_GAP_COMP				0x08
++
++#define MSCC_FCBUF_ENA_CFG_TX_ENA				BIT(0)
++#define MSCC_FCBUF_ENA_CFG_RX_ENA				BIT(4)
++
++#define MSCC_FCBUF_MODE_CFG_DROP_BEHAVIOUR			BIT(4)
++#define MSCC_FCBUF_MODE_CFG_PAUSE_REACT_ENA			BIT(8)
++#define MSCC_FCBUF_MODE_CFG_RX_PPM_RATE_ADAPT_ENA		BIT(12)
++#define MSCC_FCBUF_MODE_CFG_TX_PPM_RATE_ADAPT_ENA		BIT(16)
++#define MSCC_FCBUF_MODE_CFG_TX_CTRL_QUEUE_ENA			BIT(20)
++#define MSCC_FCBUF_MODE_CFG_PAUSE_GEN_ENA			BIT(24)
++#define MSCC_FCBUF_MODE_CFG_INCLUDE_PAUSE_RCVD_IN_PAUSE_GEN	BIT(28)
++
++#define MSCC_FCBUF_PPM_RATE_ADAPT_THRESH_CFG_TX_THRESH(x)	(x)
++#define MSCC_FCBUF_PPM_RATE_ADAPT_THRESH_CFG_TX_THRESH_M	GENMASK(15, 0)
++#define MSCC_FCBUF_PPM_RATE_ADAPT_THRESH_CFG_TX_OFFSET(x)	((x) << 16)
++#define MSCC_FCBUF_PPM_RATE_ADAPT_THRESH_CFG_TX_OFFSET_M	GENMASK(19, 16)
++#define MSCC_FCBUF_PPM_RATE_ADAPT_THRESH_CFG_RX_THRESH(x)	((x) << 20)
++#define MSCC_FCBUF_PPM_RATE_ADAPT_THRESH_CFG_RX_THRESH_M	GENMASK(31, 20)
++
++#define MSCC_FCBUF_TX_CTRL_QUEUE_CFG_START(x)			(x)
++#define MSCC_FCBUF_TX_CTRL_QUEUE_CFG_START_M			GENMASK(15, 0)
++#define MSCC_FCBUF_TX_CTRL_QUEUE_CFG_END(x)			((x) << 16)
++#define MSCC_FCBUF_TX_CTRL_QUEUE_CFG_END_M			GENMASK(31, 16)
++
++#define MSCC_FCBUF_TX_DATA_QUEUE_CFG_START(x)			(x)
++#define MSCC_FCBUF_TX_DATA_QUEUE_CFG_START_M			GENMASK(15, 0)
++#define MSCC_FCBUF_TX_DATA_QUEUE_CFG_END(x)			((x) << 16)
++#define MSCC_FCBUF_TX_DATA_QUEUE_CFG_END_M			GENMASK(31, 16)
++
++#define MSCC_FCBUF_RX_DATA_QUEUE_CFG_START(x)			(x)
++#define MSCC_FCBUF_RX_DATA_QUEUE_CFG_START_M			GENMASK(15, 0)
++#define MSCC_FCBUF_RX_DATA_QUEUE_CFG_END(x)			((x) << 16)
++#define MSCC_FCBUF_RX_DATA_QUEUE_CFG_END_M			GENMASK(31, 16)
++
++#define MSCC_FCBUF_TX_BUFF_XON_XOFF_THRESH_CFG_XOFF_THRESH(x)	(x)
++#define MSCC_FCBUF_TX_BUFF_XON_XOFF_THRESH_CFG_XOFF_THRESH_M	GENMASK(15, 0)
++#define MSCC_FCBUF_TX_BUFF_XON_XOFF_THRESH_CFG_XON_THRESH(x)	((x) << 16)
++#define MSCC_FCBUF_TX_BUFF_XON_XOFF_THRESH_CFG_XON_THRESH_M	GENMASK(31, 16)
++
++#define MSCC_FCBUF_FC_READ_THRESH_CFG_TX_THRESH(x)		(x)
++#define MSCC_FCBUF_FC_READ_THRESH_CFG_TX_THRESH_M		GENMASK(15, 0)
++#define MSCC_FCBUF_FC_READ_THRESH_CFG_RX_THRESH(x)		((x) << 16)
++#define MSCC_FCBUF_FC_READ_THRESH_CFG_RX_THRESH_M		GENMASK(31, 16)
++
++#endif
+diff --git a/drivers/net/phy/mscc_mac.h b/drivers/net/phy/mscc_mac.h
+new file mode 100644
+index 0000000000000..9420ee5175a61
+--- /dev/null
++++ b/drivers/net/phy/mscc_mac.h
+@@ -0,0 +1,159 @@
++/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
++/*
++ * Microsemi Ocelot Switch driver
++ *
++ * Copyright (c) 2017 Microsemi Corporation
++ */
++
++#ifndef _MSCC_OCELOT_LINE_MAC_H_
++#define _MSCC_OCELOT_LINE_MAC_H_
++
++#define MSCC_MAC_CFG_ENA_CFG					0x00
++#define MSCC_MAC_CFG_MODE_CFG					0x01
++#define MSCC_MAC_CFG_MAXLEN_CFG					0x02
++#define MSCC_MAC_CFG_NUM_TAGS_CFG				0x03
++#define MSCC_MAC_CFG_TAGS_CFG					0x04
++#define MSCC_MAC_CFG_ADV_CHK_CFG				0x07
++#define MSCC_MAC_CFG_LFS_CFG					0x08
++#define MSCC_MAC_CFG_LB_CFG					0x09
++#define MSCC_MAC_CFG_PKTINF_CFG					0x0a
++#define MSCC_MAC_PAUSE_CFG_TX_FRAME_CTRL			0x0b
++#define MSCC_MAC_PAUSE_CFG_TX_FRAME_CTRL_2			0x0c
++#define MSCC_MAC_PAUSE_CFG_RX_FRAME_CTRL			0x0d
++#define MSCC_MAC_PAUSE_CFG_STATE				0x0e
++#define MSCC_MAC_PAUSE_CFG_MAC_ADDRESS_LSB			0x0f
++#define MSCC_MAC_PAUSE_CFG_MAC_ADDRESS_MSB			0x10
++#define MSCC_MAC_STATUS_RX_LANE_STICKY_0			0x11
++#define MSCC_MAC_STATUS_RX_LANE_STICKY_1			0x12
++#define MSCC_MAC_STATUS_TX_MONITOR_STICKY			0x13
++#define MSCC_MAC_STATUS_TX_MONITOR_STICKY_MASK			0x14
++#define MSCC_MAC_STATUS_STICKY					0x15
++#define MSCC_MAC_STATUS_STICKY_MASK				0x16
++#define MSCC_MAC_STATS_32BIT_RX_HIH_CKSM_ERR_CNT		0x17
++#define MSCC_MAC_STATS_32BIT_RX_XGMII_PROT_ERR_CNT		0x18
++#define MSCC_MAC_STATS_32BIT_RX_SYMBOL_ERR_CNT			0x19
++#define MSCC_MAC_STATS_32BIT_RX_PAUSE_CNT			0x1a
++#define MSCC_MAC_STATS_32BIT_RX_UNSUP_OPCODE_CNT		0x1b
++#define MSCC_MAC_STATS_32BIT_RX_UC_CNT				0x1c
++#define MSCC_MAC_STATS_32BIT_RX_MC_CNT				0x1d
++#define MSCC_MAC_STATS_32BIT_RX_BC_CNT				0x1e
++#define MSCC_MAC_STATS_32BIT_RX_CRC_ERR_CNT			0x1f
++#define MSCC_MAC_STATS_32BIT_RX_UNDERSIZE_CNT			0x20
++#define MSCC_MAC_STATS_32BIT_RX_FRAGMENTS_CNT			0x21
++#define MSCC_MAC_STATS_32BIT_RX_IN_RANGE_LEN_ERR_CNT		0x22
++#define MSCC_MAC_STATS_32BIT_RX_OUT_OF_RANGE_LEN_ERR_CNT	0x23
++#define MSCC_MAC_STATS_32BIT_RX_OVERSIZE_CNT			0x24
++#define MSCC_MAC_STATS_32BIT_RX_JABBERS_CNT			0x25
++#define MSCC_MAC_STATS_32BIT_RX_SIZE64_CNT			0x26
++#define MSCC_MAC_STATS_32BIT_RX_SIZE65TO127_CNT			0x27
++#define MSCC_MAC_STATS_32BIT_RX_SIZE128TO255_CNT		0x28
++#define MSCC_MAC_STATS_32BIT_RX_SIZE256TO511_CNT		0x29
++#define MSCC_MAC_STATS_32BIT_RX_SIZE512TO1023_CNT		0x2a
++#define MSCC_MAC_STATS_32BIT_RX_SIZE1024TO1518_CNT		0x2b
++#define MSCC_MAC_STATS_32BIT_RX_SIZE1519TOMAX_CNT		0x2c
++#define MSCC_MAC_STATS_32BIT_RX_IPG_SHRINK_CNT			0x2d
++#define MSCC_MAC_STATS_32BIT_TX_PAUSE_CNT			0x2e
++#define MSCC_MAC_STATS_32BIT_TX_UC_CNT				0x2f
++#define MSCC_MAC_STATS_32BIT_TX_MC_CNT				0x30
++#define MSCC_MAC_STATS_32BIT_TX_BC_CNT				0x31
++#define MSCC_MAC_STATS_32BIT_TX_SIZE64_CNT			0x32
++#define MSCC_MAC_STATS_32BIT_TX_SIZE65TO127_CNT			0x33
++#define MSCC_MAC_STATS_32BIT_TX_SIZE128TO255_CNT		0x34
++#define MSCC_MAC_STATS_32BIT_TX_SIZE256TO511_CNT		0x35
++#define MSCC_MAC_STATS_32BIT_TX_SIZE512TO1023_CNT		0x36
++#define MSCC_MAC_STATS_32BIT_TX_SIZE1024TO1518_CNT		0x37
++#define MSCC_MAC_STATS_32BIT_TX_SIZE1519TOMAX_CNT		0x38
++#define MSCC_MAC_STATS_40BIT_RX_BAD_BYTES_CNT			0x39
++#define MSCC_MAC_STATS_40BIT_RX_BAD_BYTES_MSB_CNT		0x3a
++#define MSCC_MAC_STATS_40BIT_RX_OK_BYTES_CNT			0x3b
++#define MSCC_MAC_STATS_40BIT_RX_OK_BYTES_MSB_CNT		0x3c
++#define MSCC_MAC_STATS_40BIT_RX_IN_BYTES_CNT			0x3d
++#define MSCC_MAC_STATS_40BIT_RX_IN_BYTES_MSB_CNT		0x3e
++#define MSCC_MAC_STATS_40BIT_TX_OK_BYTES_CNT			0x3f
++#define MSCC_MAC_STATS_40BIT_TX_OK_BYTES_MSB_CNT		0x40
++#define MSCC_MAC_STATS_40BIT_TX_OUT_BYTES_CNT			0x41
++#define MSCC_MAC_STATS_40BIT_TX_OUT_BYTES_MSB_CNT		0x42
++
++#define MSCC_MAC_CFG_ENA_CFG_RX_CLK_ENA				BIT(0)
++#define MSCC_MAC_CFG_ENA_CFG_TX_CLK_ENA				BIT(4)
++#define MSCC_MAC_CFG_ENA_CFG_RX_SW_RST				BIT(8)
++#define MSCC_MAC_CFG_ENA_CFG_TX_SW_RST				BIT(12)
++#define MSCC_MAC_CFG_ENA_CFG_RX_ENA				BIT(16)
++#define MSCC_MAC_CFG_ENA_CFG_TX_ENA				BIT(20)
++
++#define MSCC_MAC_CFG_MODE_CFG_FORCE_CW_UPDATE_INTERVAL(x)	((x) << 20)
++#define MSCC_MAC_CFG_MODE_CFG_FORCE_CW_UPDATE_INTERVAL_M	GENMASK(29, 20)
++#define MSCC_MAC_CFG_MODE_CFG_FORCE_CW_UPDATE			BIT(16)
++#define MSCC_MAC_CFG_MODE_CFG_TUNNEL_PAUSE_FRAMES		BIT(14)
++#define MSCC_MAC_CFG_MODE_CFG_MAC_PREAMBLE_CFG(x)		((x) << 10)
++#define MSCC_MAC_CFG_MODE_CFG_MAC_PREAMBLE_CFG_M		GENMASK(12, 10)
++#define MSCC_MAC_CFG_MODE_CFG_MAC_IPG_CFG			BIT(6)
++#define MSCC_MAC_CFG_MODE_CFG_XGMII_GEN_MODE_ENA		BIT(4)
++#define MSCC_MAC_CFG_MODE_CFG_HIH_CRC_CHECK			BIT(2)
++#define MSCC_MAC_CFG_MODE_CFG_UNDERSIZED_FRAME_DROP_DIS		BIT(1)
++#define MSCC_MAC_CFG_MODE_CFG_DISABLE_DIC			BIT(0)
++
++#define MSCC_MAC_CFG_MAXLEN_CFG_MAX_LEN_TAG_CHK			BIT(16)
++#define MSCC_MAC_CFG_MAXLEN_CFG_MAX_LEN(x)			(x)
++#define MSCC_MAC_CFG_MAXLEN_CFG_MAX_LEN_M			GENMASK(15, 0)
++
++#define MSCC_MAC_CFG_TAGS_CFG_RSZ				0x4
++#define MSCC_MAC_CFG_TAGS_CFG_TAG_ID(x)				((x) << 16)
++#define MSCC_MAC_CFG_TAGS_CFG_TAG_ID_M				GENMASK(31, 16)
++#define MSCC_MAC_CFG_TAGS_CFG_TAG_ENA				BIT(4)
++
++#define MSCC_MAC_CFG_ADV_CHK_CFG_EXT_EOP_CHK_ENA		BIT(24)
++#define MSCC_MAC_CFG_ADV_CHK_CFG_EXT_SOP_CHK_ENA		BIT(20)
++#define MSCC_MAC_CFG_ADV_CHK_CFG_SFD_CHK_ENA			BIT(16)
++#define MSCC_MAC_CFG_ADV_CHK_CFG_PRM_SHK_CHK_DIS		BIT(12)
++#define MSCC_MAC_CFG_ADV_CHK_CFG_PRM_CHK_ENA			BIT(8)
++#define MSCC_MAC_CFG_ADV_CHK_CFG_OOR_ERR_ENA			BIT(4)
++#define MSCC_MAC_CFG_ADV_CHK_CFG_INR_ERR_ENA			BIT(0)
++
++#define MSCC_MAC_CFG_LFS_CFG_LFS_INH_TX				BIT(8)
++#define MSCC_MAC_CFG_LFS_CFG_LFS_DIS_TX				BIT(4)
++#define MSCC_MAC_CFG_LFS_CFG_LFS_UNIDIR_ENA			BIT(3)
++#define MSCC_MAC_CFG_LFS_CFG_USE_LEADING_EDGE_DETECT		BIT(2)
++#define MSCC_MAC_CFG_LFS_CFG_SPURIOUS_Q_DIS			BIT(1)
++#define MSCC_MAC_CFG_LFS_CFG_LFS_MODE_ENA			BIT(0)
++
++#define MSCC_MAC_CFG_LB_CFG_XGMII_HOST_LB_ENA			BIT(4)
++#define MSCC_MAC_CFG_LB_CFG_XGMII_PHY_LB_ENA			BIT(0)
++
++#define MSCC_MAC_CFG_PKTINF_CFG_STRIP_FCS_ENA			BIT(0)
++#define MSCC_MAC_CFG_PKTINF_CFG_INSERT_FCS_ENA			BIT(4)
++#define MSCC_MAC_CFG_PKTINF_CFG_STRIP_PREAMBLE_ENA		BIT(8)
++#define MSCC_MAC_CFG_PKTINF_CFG_INSERT_PREAMBLE_ENA		BIT(12)
++#define MSCC_MAC_CFG_PKTINF_CFG_LPI_RELAY_ENA			BIT(16)
++#define MSCC_MAC_CFG_PKTINF_CFG_LF_RELAY_ENA			BIT(20)
++#define MSCC_MAC_CFG_PKTINF_CFG_RF_RELAY_ENA			BIT(24)
++#define MSCC_MAC_CFG_PKTINF_CFG_ENABLE_TX_PADDING		BIT(25)
++#define MSCC_MAC_CFG_PKTINF_CFG_ENABLE_RX_PADDING		BIT(26)
++#define MSCC_MAC_CFG_PKTINF_CFG_ENABLE_4BYTE_PREAMBLE		BIT(27)
++#define MSCC_MAC_CFG_PKTINF_CFG_MACSEC_BYPASS_NUM_PTP_STALL_CLKS(x)	((x) << 28)
++#define MSCC_MAC_CFG_PKTINF_CFG_MACSEC_BYPASS_NUM_PTP_STALL_CLKS_M	GENMASK(30, 28)
++
++#define MSCC_MAC_PAUSE_CFG_TX_FRAME_CTRL_PAUSE_VALUE(x)		((x) << 16)
++#define MSCC_MAC_PAUSE_CFG_TX_FRAME_CTRL_PAUSE_VALUE_M		GENMASK(31, 16)
++#define MSCC_MAC_PAUSE_CFG_TX_FRAME_CTRL_WAIT_FOR_LPI_LOW	BIT(12)
++#define MSCC_MAC_PAUSE_CFG_TX_FRAME_CTRL_USE_PAUSE_STALL_ENA	BIT(8)
++#define MSCC_MAC_PAUSE_CFG_TX_FRAME_CTRL_PAUSE_REPL_MODE	BIT(4)
++#define MSCC_MAC_PAUSE_CFG_TX_FRAME_CTRL_PAUSE_FRC_FRAME	BIT(2)
++#define MSCC_MAC_PAUSE_CFG_TX_FRAME_CTRL_PAUSE_MODE(x)		(x)
++#define MSCC_MAC_PAUSE_CFG_TX_FRAME_CTRL_PAUSE_MODE_M		GENMASK(1, 0)
++
++#define MSCC_MAC_PAUSE_CFG_RX_FRAME_CTRL_EARLY_PAUSE_DETECT_ENA	BIT(16)
++#define MSCC_MAC_PAUSE_CFG_RX_FRAME_CTRL_PRE_CRC_MODE		BIT(20)
++#define MSCC_MAC_PAUSE_CFG_RX_FRAME_CTRL_PAUSE_TIMER_ENA	BIT(12)
++#define MSCC_MAC_PAUSE_CFG_RX_FRAME_CTRL_PAUSE_REACT_ENA	BIT(8)
++#define MSCC_MAC_PAUSE_CFG_RX_FRAME_CTRL_PAUSE_FRAME_DROP_ENA	BIT(4)
++#define MSCC_MAC_PAUSE_CFG_RX_FRAME_CTRL_PAUSE_MODE		BIT(0)
++
++#define MSCC_MAC_PAUSE_CFG_STATE_PAUSE_STATE			BIT(0)
++#define MSCC_MAC_PAUSE_CFG_STATE_MAC_TX_PAUSE_GEN		BIT(4)
++
++#define MSCC_PROC_0_IP_1588_TOP_CFG_STAT_MODE_CTL			0x2
++#define MSCC_PROC_0_IP_1588_TOP_CFG_STAT_MODE_CTL_PROTOCOL_MODE(x)	(x)
++#define MSCC_PROC_0_IP_1588_TOP_CFG_STAT_MODE_CTL_PROTOCOL_MODE_M	GENMASK(2, 0)
++
++#endif /* _MSCC_OCELOT_LINE_MAC_H_ */
+diff --git a/drivers/net/phy/mscc_macsec.h b/drivers/net/phy/mscc_macsec.h
+new file mode 100644
+index 0000000000000..0d108da28dad2
+--- /dev/null
++++ b/drivers/net/phy/mscc_macsec.h
+@@ -0,0 +1,260 @@
++/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
++/*
++ * Microsemi Ocelot Switch driver
++ *
++ * Copyright (c) 2018 Microsemi Corporation
++ */
++
++#ifndef _MSCC_OCELOT_MACSEC_H_
++#define _MSCC_OCELOT_MACSEC_H_
++
++#define CONTROL_TYPE_EGRESS		0x6
++#define CONTROL_TYPE_INGRESS		0xf
++#define CONTROL_IV0			BIT(5)
++#define CONTROL_IV1			BIT(6)
++#define CONTROL_IV2			BIT(7)
++#define CONTROL_UPDATE_SEQ		BIT(13)
++#define CONTROL_IV_IN_SEQ		BIT(14)
++#define CONTROL_ENCRYPT_AUTH		BIT(15)
++#define CONTROL_KEY_IN_CTX		BIT(16)
++#define CONTROL_CRYPTO_ALG(x)		((x) << 17)
++#define     CTRYPTO_ALG_AES_CTR_128	0x5
++#define     CTRYPTO_ALG_AES_CTR_192	0x6
++#define     CTRYPTO_ALG_AES_CTR_256	0x7
++#define CONTROL_DIGEST_TYPE(x)		((x) << 21)
++#define CONTROL_AUTH_ALG(x)		((x) << 23)
++#define     AUTH_ALG_AES_GHAS		0x4
++#define CONTROL_AN(x)			((x) << 26)
++#define CONTROL_SEQ_TYPE(x)		((x) << 28)
++#define CONTROL_SEQ_MASK		BIT(30)
++#define CONTROL_CONTEXT_ID		BIT(31)
++
++enum mscc_macsec_destination_ports {
++	MSCC_MS_PORT_COMMON		= 0,
++	MSCC_MS_PORT_RSVD		= 1,
++	MSCC_MS_PORT_CONTROLLED		= 2,
++	MSCC_MS_PORT_UNCONTROLLED	= 3,
++};
++
++enum mscc_macsec_drop_actions {
++	MSCC_MS_ACTION_BYPASS_CRC	= 0,
++	MSCC_MS_ACTION_BYPASS_BAD	= 1,
++	MSCC_MS_ACTION_DROP		= 2,
++	MSCC_MS_ACTION_BYPASS		= 3,
++};
++
++enum mscc_macsec_flow_types {
++	MSCC_MS_FLOW_BYPASS		= 0,
++	MSCC_MS_FLOW_DROP		= 1,
++	MSCC_MS_FLOW_INGRESS		= 2,
++	MSCC_MS_FLOW_EGRESS		= 3,
++};
++
++enum mscc_macsec_validate_levels {
++	MSCC_MS_VALIDATE_DISABLED	= 0,
++	MSCC_MS_VALIDATE_CHECK		= 1,
++	MSCC_MS_VALIDATE_STRICT		= 2,
++};
++
++#define MSCC_MS_XFORM_REC(x, y)		(((x) << 5) + (y))
++#define MSCC_MS_ENA_CFG			0x800
++#define MSCC_MS_FC_CFG			0x804
++#define MSCC_MS_SAM_MISC_MATCH(x)	(0x1004 + ((x) << 4))
++#define MSCC_MS_SAM_MATCH_SCI_LO(x)	(0x1005 + ((x) << 4))
++#define MSCC_MS_SAM_MATCH_SCI_HI(x)	(0x1006 + ((x) << 4))
++#define MSCC_MS_SAM_MASK(x)		(0x1007 + ((x) << 4))
++#define MSCC_MS_SAM_ENTRY_SET1		0x1808
++#define MSCC_MS_SAM_ENTRY_CLEAR1	0x180c
++#define MSCC_MS_SAM_FLOW_CTRL(x)	(0x1c00 + (x))
++#define MSCC_MS_SAM_CP_TAG		0x1e40
++#define MSCC_MS_SAM_NM_FLOW_NCP		0x1e51
++#define MSCC_MS_SAM_NM_FLOW_CP		0x1e52
++#define MSCC_MS_MISC_CONTROL		0x1e5f
++#define MSCC_MS_COUNT_CONTROL		0x3204
++#define MSCC_MS_PARAMS2_IG_CC_CONTROL	0x3a10
++#define MSCC_MS_PARAMS2_IG_CP_TAG	0x3a14
++#define MSCC_MS_VLAN_MTU_CHECK(x)	(0x3c40 + (x))
++#define MSCC_MS_NON_VLAN_MTU_CHECK	0x3c48
++#define MSCC_MS_PP_CTRL			0x3c4b
++#define MSCC_MS_STATUS_CONTEXT_CTRL	0x3d02
++#define MSCC_MS_INTR_CTRL_STATUS	0x3d04
++#define MSCC_MS_BLOCK_CTX_UPDATE	0x3d0c
++
++/* MACSEC_ENA_CFG */
++#define MSCC_MS_ENA_CFG_CLK_ENA				BIT(0)
++#define MSCC_MS_ENA_CFG_SW_RST				BIT(1)
++#define MSCC_MS_ENA_CFG_MACSEC_BYPASS_ENA		BIT(8)
++#define MSCC_MS_ENA_CFG_MACSEC_ENA			BIT(9)
++#define MSCC_MS_ENA_CFG_MACSEC_SPEED_MODE(x)		((x) << 10)
++#define MSCC_MS_ENA_CFG_MACSEC_SPEED_MODE_M		GENMASK(12, 10)
++
++/* MACSEC_FC_CFG */
++#define MSCC_MS_FC_CFG_FCBUF_ENA			BIT(0)
++#define MSCC_MS_FC_CFG_USE_PKT_EXPANSION_INDICATION	BIT(1)
++#define MSCC_MS_FC_CFG_LOW_THRESH(x)			((x) << 4)
++#define MSCC_MS_FC_CFG_LOW_THRESH_M			GENMASK(7, 4)
++#define MSCC_MS_FC_CFG_HIGH_THRESH(x)			((x) << 8)
++#define MSCC_MS_FC_CFG_HIGH_THRESH_M			GENMASK(11, 8)
++#define MSCC_MS_FC_CFG_LOW_BYTES_VAL(x)			((x) << 12)
++#define MSCC_MS_FC_CFG_LOW_BYTES_VAL_M			GENMASK(14, 12)
++#define MSCC_MS_FC_CFG_HIGH_BYTES_VAL(x)		((x) << 16)
++#define MSCC_MS_FC_CFG_HIGH_BYTES_VAL_M			GENMASK(18, 16)
++
++/* MSCC_MS_SAM_MAC_SA_MATCH_HI */
++#define MSCC_MS_SAM_MAC_SA_MATCH_HI_ETYPE(x)		((x) << 16)
++#define MSCC_MS_SAM_MAC_SA_MATCH_HI_ETYPE_M		GENMASK(31, 16)
++
++/* MACSEC_SAM_MISC_MATCH */
++#define MSCC_MS_SAM_MISC_MATCH_VLAN_VALID		BIT(0)
++#define MSCC_MS_SAM_MISC_MATCH_QINQ_FOUND		BIT(1)
++#define MSCC_MS_SAM_MISC_MATCH_STAG_VALID		BIT(2)
++#define MSCC_MS_SAM_MISC_MATCH_QTAG_VALID		BIT(3)
++#define MSCC_MS_SAM_MISC_MATCH_VLAN_UP(x)		((x) << 4)
++#define MSCC_MS_SAM_MISC_MATCH_VLAN_UP_M		GENMASK(6, 4)
++#define MSCC_MS_SAM_MISC_MATCH_CONTROL_PACKET		BIT(7)
++#define MSCC_MS_SAM_MISC_MATCH_UNTAGGED			BIT(8)
++#define MSCC_MS_SAM_MISC_MATCH_TAGGED			BIT(9)
++#define MSCC_MS_SAM_MISC_MATCH_BAD_TAG			BIT(10)
++#define MSCC_MS_SAM_MISC_MATCH_KAY_TAG			BIT(11)
++#define MSCC_MS_SAM_MISC_MATCH_SOURCE_PORT(x)		((x) << 12)
++#define MSCC_MS_SAM_MISC_MATCH_SOURCE_PORT_M		GENMASK(13, 12)
++#define MSCC_MS_SAM_MISC_MATCH_PRIORITY(x)		((x) << 16)
++#define MSCC_MS_SAM_MISC_MATCH_PRIORITY_M		GENMASK(19, 16)
++#define MSCC_MS_SAM_MISC_MATCH_AN(x)			((x) << 24)
++#define MSCC_MS_SAM_MISC_MATCH_TCI(x)			((x) << 26)
++
++/* MACSEC_SAM_MASK */
++#define MSCC_MS_SAM_MASK_MAC_SA_MASK(x)			(x)
++#define MSCC_MS_SAM_MASK_MAC_SA_MASK_M			GENMASK(5, 0)
++#define MSCC_MS_SAM_MASK_MAC_DA_MASK(x)			((x) << 6)
++#define MSCC_MS_SAM_MASK_MAC_DA_MASK_M			GENMASK(11, 6)
++#define MSCC_MS_SAM_MASK_MAC_ETYPE_MASK			BIT(12)
++#define MSCC_MS_SAM_MASK_VLAN_VLD_MASK			BIT(13)
++#define MSCC_MS_SAM_MASK_QINQ_FOUND_MASK		BIT(14)
++#define MSCC_MS_SAM_MASK_STAG_VLD_MASK			BIT(15)
++#define MSCC_MS_SAM_MASK_QTAG_VLD_MASK			BIT(16)
++#define MSCC_MS_SAM_MASK_VLAN_UP_MASK			BIT(17)
++#define MSCC_MS_SAM_MASK_VLAN_ID_MASK			BIT(18)
++#define MSCC_MS_SAM_MASK_SOURCE_PORT_MASK		BIT(19)
++#define MSCC_MS_SAM_MASK_CTL_PACKET_MASK		BIT(20)
++#define MSCC_MS_SAM_MASK_VLAN_UP_INNER_MASK		BIT(21)
++#define MSCC_MS_SAM_MASK_VLAN_ID_INNER_MASK		BIT(22)
++#define MSCC_MS_SAM_MASK_SCI_MASK			BIT(23)
++#define MSCC_MS_SAM_MASK_AN_MASK(x)			((x) << 24)
++#define MSCC_MS_SAM_MASK_TCI_MASK(x)			((x) << 26)
++
++/* MACSEC_SAM_FLOW_CTRL_EGR */
++#define MSCC_MS_SAM_FLOW_CTRL_FLOW_TYPE(x)		(x)
++#define MSCC_MS_SAM_FLOW_CTRL_FLOW_TYPE_M		GENMASK(1, 0)
++#define MSCC_MS_SAM_FLOW_CTRL_DEST_PORT(x)		((x) << 2)
++#define MSCC_MS_SAM_FLOW_CTRL_DEST_PORT_M		GENMASK(3, 2)
++#define MSCC_MS_SAM_FLOW_CTRL_RESV_4			BIT(4)
++#define MSCC_MS_SAM_FLOW_CTRL_FLOW_CRYPT_AUTH		BIT(5)
++#define MSCC_MS_SAM_FLOW_CTRL_DROP_ACTION(x)		((x) << 6)
++#define MSCC_MS_SAM_FLOW_CTRL_DROP_ACTION_M		GENMASK(7, 6)
++#define MSCC_MS_SAM_FLOW_CTRL_RESV_15_TO_8(x)		((x) << 8)
++#define MSCC_MS_SAM_FLOW_CTRL_RESV_15_TO_8_M		GENMASK(15, 8)
++#define MSCC_MS_SAM_FLOW_CTRL_PROTECT_FRAME		BIT(16)
++#define MSCC_MS_SAM_FLOW_CTRL_REPLAY_PROTECT		BIT(16)
++#define MSCC_MS_SAM_FLOW_CTRL_SA_IN_USE			BIT(17)
++#define MSCC_MS_SAM_FLOW_CTRL_INCLUDE_SCI		BIT(18)
++#define MSCC_MS_SAM_FLOW_CTRL_USE_ES			BIT(19)
++#define MSCC_MS_SAM_FLOW_CTRL_USE_SCB			BIT(20)
++#define MSCC_MS_SAM_FLOW_CTRL_VALIDATE_FRAMES(x)	((x) << 19)
++#define MSCC_MS_SAM_FLOW_CTRL_TAG_BYPASS_SIZE(x)	((x) << 21)
++#define MSCC_MS_SAM_FLOW_CTRL_TAG_BYPASS_SIZE_M		GENMASK(22, 21)
++#define MSCC_MS_SAM_FLOW_CTRL_RESV_23			BIT(23)
++#define MSCC_MS_SAM_FLOW_CTRL_CONFIDENTIALITY_OFFSET(x)	((x) << 24)
++#define MSCC_MS_SAM_FLOW_CTRL_CONFIDENTIALITY_OFFSET_M	GENMASK(30, 24)
++#define MSCC_MS_SAM_FLOW_CTRL_CONF_PROTECT		BIT(31)
++
++/* MACSEC_SAM_CP_TAG */
++#define MSCC_MS_SAM_CP_TAG_MAP_TBL(x)			(x)
++#define MSCC_MS_SAM_CP_TAG_MAP_TBL_M			GENMASK(23, 0)
++#define MSCC_MS_SAM_CP_TAG_DEF_UP(x)			((x) << 24)
++#define MSCC_MS_SAM_CP_TAG_DEF_UP_M			GENMASK(26, 24)
++#define MSCC_MS_SAM_CP_TAG_STAG_UP_EN			BIT(27)
++#define MSCC_MS_SAM_CP_TAG_QTAG_UP_EN			BIT(28)
++#define MSCC_MS_SAM_CP_TAG_PARSE_QINQ			BIT(29)
++#define MSCC_MS_SAM_CP_TAG_PARSE_STAG			BIT(30)
++#define MSCC_MS_SAM_CP_TAG_PARSE_QTAG			BIT(31)
++
++/* MACSEC_SAM_NM_FLOW_NCP */
++#define MSCC_MS_SAM_NM_FLOW_NCP_UNTAGGED_FLOW_TYPE(x)	(x)
++#define MSCC_MS_SAM_NM_FLOW_NCP_UNTAGGED_DEST_PORT(x)	((x) << 2)
++#define MSCC_MS_SAM_NM_FLOW_NCP_UNTAGGED_DROP_ACTION(x)	((x) << 6)
++#define MSCC_MS_SAM_NM_FLOW_NCP_TAGGED_FLOW_TYPE(x)	((x) << 8)
++#define MSCC_MS_SAM_NM_FLOW_NCP_TAGGED_DEST_PORT(x)	((x) << 10)
++#define MSCC_MS_SAM_NM_FLOW_NCP_TAGGED_DROP_ACTION(x)	((x) << 14)
++#define MSCC_MS_SAM_NM_FLOW_NCP_BADTAG_FLOW_TYPE(x)	((x) << 16)
++#define MSCC_MS_SAM_NM_FLOW_NCP_BADTAG_DEST_PORT(x)	((x) << 18)
++#define MSCC_MS_SAM_NM_FLOW_NCP_BADTAG_DROP_ACTION(x)	((x) << 22)
++#define MSCC_MS_SAM_NM_FLOW_NCP_KAY_FLOW_TYPE(x)	((x) << 24)
++#define MSCC_MS_SAM_NM_FLOW_NCP_KAY_DEST_PORT(x)	((x) << 26)
++#define MSCC_MS_SAM_NM_FLOW_NCP_KAY_DROP_ACTION(x)	((x) << 30)
++
++/* MACSEC_SAM_NM_FLOW_CP */
++#define MSCC_MS_SAM_NM_FLOW_CP_UNTAGGED_FLOW_TYPE(x)	(x)
++#define MSCC_MS_SAM_NM_FLOW_CP_UNTAGGED_DEST_PORT(x)	((x) << 2)
++#define MSCC_MS_SAM_NM_FLOW_CP_UNTAGGED_DROP_ACTION(x)	((x) << 6)
++#define MSCC_MS_SAM_NM_FLOW_CP_TAGGED_FLOW_TYPE(x)	((x) << 8)
++#define MSCC_MS_SAM_NM_FLOW_CP_TAGGED_DEST_PORT(x)	((x) << 10)
++#define MSCC_MS_SAM_NM_FLOW_CP_TAGGED_DROP_ACTION(x)	((x) << 14)
++#define MSCC_MS_SAM_NM_FLOW_CP_BADTAG_FLOW_TYPE(x)	((x) << 16)
++#define MSCC_MS_SAM_NM_FLOW_CP_BADTAG_DEST_PORT(x)	((x) << 18)
++#define MSCC_MS_SAM_NM_FLOW_CP_BADTAG_DROP_ACTION(x)	((x) << 22)
++#define MSCC_MS_SAM_NM_FLOW_CP_KAY_FLOW_TYPE(x)		((x) << 24)
++#define MSCC_MS_SAM_NM_FLOW_CP_KAY_DEST_PORT(x)		((x) << 26)
++#define MSCC_MS_SAM_NM_FLOW_CP_KAY_DROP_ACTION(x)	((x) << 30)
++
++/* MACSEC_MISC_CONTROL */
++#define MSCC_MS_MISC_CONTROL_MC_LATENCY_FIX(x)		(x)
++#define MSCC_MS_MISC_CONTROL_MC_LATENCY_FIX_M		GENMASK(5, 0)
++#define MSCC_MS_MISC_CONTROL_STATIC_BYPASS		BIT(8)
++#define MSCC_MS_MISC_CONTROL_NM_MACSEC_EN		BIT(9)
++#define MSCC_MS_MISC_CONTROL_VALIDATE_FRAMES(x)		((x) << 10)
++#define MSCC_MS_MISC_CONTROL_VALIDATE_FRAMES_M		GENMASK(11, 10)
++#define MSCC_MS_MISC_CONTROL_XFORM_REC_SIZE(x)		((x) << 24)
++#define MSCC_MS_MISC_CONTROL_XFORM_REC_SIZE_M		GENMASK(25, 24)
++
++/* MACSEC_COUNT_CONTROL */
++#define MSCC_MS_COUNT_CONTROL_RESET_ALL			BIT(0)
++#define MSCC_MS_COUNT_CONTROL_DEBUG_ACCESS		BIT(1)
++#define MSCC_MS_COUNT_CONTROL_SATURATE_CNTRS		BIT(2)
++#define MSCC_MS_COUNT_CONTROL_AUTO_CNTR_RESET		BIT(3)
++
++/* MACSEC_PARAMS2_IG_CC_CONTROL */
++#define MSCC_MS_PARAMS2_IG_CC_CONTROL_NON_MATCH_CTRL_ACT	BIT(14)
++#define MSCC_MS_PARAMS2_IG_CC_CONTROL_NON_MATCH_ACT	BIT(15)
++
++/* MACSEC_PARAMS2_IG_CP_TAG */
++#define MSCC_MS_PARAMS2_IG_CP_TAG_MAP_TBL(x)		(x)
++#define MSCC_MS_PARAMS2_IG_CP_TAG_MAP_TBL_M		GENMASK(23, 0)
++#define MSCC_MS_PARAMS2_IG_CP_TAG_DEF_UP(x)		((x) << 24)
++#define MSCC_MS_PARAMS2_IG_CP_TAG_DEF_UP_M		GENMASK(26, 24)
++#define MSCC_MS_PARAMS2_IG_CP_TAG_STAG_UP_EN		BIT(27)
++#define MSCC_MS_PARAMS2_IG_CP_TAG_QTAG_UP_EN		BIT(28)
++#define MSCC_MS_PARAMS2_IG_CP_TAG_PARSE_QINQ		BIT(29)
++#define MSCC_MS_PARAMS2_IG_CP_TAG_PARSE_STAG		BIT(30)
++#define MSCC_MS_PARAMS2_IG_CP_TAG_PARSE_QTAG		BIT(31)
++
++/* MACSEC_VLAN_MTU_CHECK */
++#define MSCC_MS_VLAN_MTU_CHECK_MTU_COMPARE(x)		(x)
++#define MSCC_MS_VLAN_MTU_CHECK_MTU_COMPARE_M		GENMASK(14, 0)
++#define MSCC_MS_VLAN_MTU_CHECK_MTU_COMP_DROP		BIT(15)
++
++/* MACSEC_NON_VLAN_MTU_CHECK */
++#define MSCC_MS_NON_VLAN_MTU_CHECK_NV_MTU_COMPARE(x)	(x)
++#define MSCC_MS_NON_VLAN_MTU_CHECK_NV_MTU_COMPARE_M	GENMASK(14, 0)
++#define MSCC_MS_NON_VLAN_MTU_CHECK_NV_MTU_COMP_DROP	BIT(15)
++
++/* MACSEC_PP_CTRL */
++#define MSCC_MS_PP_CTRL_MACSEC_OCTET_INCR_MODE		BIT(0)
++
++/* MACSEC_INTR_CTRL_STATUS */
++#define MSCC_MS_INTR_CTRL_STATUS_INTR_CLR_STATUS(x)	(x)
++#define MSCC_MS_INTR_CTRL_STATUS_INTR_CLR_STATUS_M	GENMASK(15, 0)
++#define MSCC_MS_INTR_CTRL_STATUS_INTR_ENABLE(x)		((x) << 16)
++#define MSCC_MS_INTR_CTRL_STATUS_INTR_ENABLE_M		GENMASK(31, 16)
++
++#endif
+-- 
+cgit 1.2.3-1.el7
+
diff --git a/target/linux/mediatek/patches-5.4/999-1758-08-v5.18-net-phy-mscc-macsec-support.patch b/target/linux/mediatek/patches-5.4/999-1758-08-v5.18-net-phy-mscc-macsec-support.patch
new file mode 100644
index 0000000..9c7ea17
--- /dev/null
+++ b/target/linux/mediatek/patches-5.4/999-1758-08-v5.18-net-phy-mscc-macsec-support.patch
@@ -0,0 +1,786 @@
+From 28c5107aa904ef9db6b023039d20b6b4c4181675 Mon Sep 17 00:00:00 2001
+From: Antoine Tenart <antoine.tenart@bootlin.com>
+Date: Mon, 13 Jan 2020 23:31:46 +0100
+Subject: net: phy: mscc: macsec support
+
+This patch adds MACsec offloading support to some Microsemi PHYs, to
+configure flows and transformations so that matched packets can be
+processed by the MACsec engine, either at egress, or at ingress.
+
+Signed-off-by: Antoine Tenart <antoine.tenart@bootlin.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+---
+ drivers/net/phy/Kconfig       |   3 +
+ drivers/net/phy/mscc.c        | 691 ++++++++++++++++++++++++++++++++++++++++++
+ drivers/net/phy/mscc_macsec.h |   4 +
+ 3 files changed, 698 insertions(+)
+
+diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
+index 2e016271e1268..ac82ff959b7c0 100644
+--- a/drivers/net/phy/Kconfig
++++ b/drivers/net/phy/Kconfig
+@@ -437,6 +437,9 @@ config MICROCHIP_T1_PHY
+ 
+ config MICROSEMI_PHY
+ 	tristate "Microsemi PHYs"
++	depends on MACSEC || MACSEC=n
++	select CRYPTO_AES
++	select CRYPTO_ECB
+ 	---help---
+ 	  Currently supports VSC8514, VSC8530, VSC8531, VSC8540 and VSC8541 PHYs
+ 
+diff --git a/drivers/net/phy/mscc.c b/drivers/net/phy/mscc.c
+index 8579a59a1336a..ccf17818570f9 100644
+--- a/drivers/net/phy/mscc.c
++++ b/drivers/net/phy/mscc.c
+@@ -18,6 +18,13 @@
+ #include <linux/netdevice.h>
+ #include <dt-bindings/net/mscc-phy-vsc8531.h>
+ 
++#include <linux/scatterlist.h>
++#include <crypto/skcipher.h>
++
++#if IS_ENABLED(CONFIG_MACSEC)
++#include <net/macsec.h>
++#endif
++
+ #include "mscc_macsec.h"
+ #include "mscc_mac.h"
+ #include "mscc_fc_buffer.h"
+@@ -436,6 +443,44 @@ static const struct vsc85xx_hw_stat vsc8584_hw_stats[] = {
+ 	},
+ };
+ 
++#if IS_ENABLED(CONFIG_MACSEC)
++struct macsec_flow {
++	struct list_head list;
++	enum mscc_macsec_destination_ports port;
++	enum macsec_bank bank;
++	u32 index;
++	int assoc_num;
++	bool has_transformation;
++
++	/* Highest takes precedence [0..15] */
++	u8 priority;
++
++	u8 key[MACSEC_KEYID_LEN];
++
++	union {
++		struct macsec_rx_sa *rx_sa;
++		struct macsec_tx_sa *tx_sa;
++	};
++
++	/* Matching */
++	struct {
++		u8 sci:1;
++		u8 tagged:1;
++		u8 untagged:1;
++		u8 etype:1;
++	} match;
++
++	u16 etype;
++
++	/* Action */
++	struct {
++		u8 bypass:1;
++		u8 drop:1;
++	} action;
++
++};
++#endif
++
+ struct vsc8531_private {
+ 	int rate_magic;
+ 	u16 supp_led_modes;
+@@ -449,6 +494,19 @@ struct vsc8531_private {
+ 	 * package.
+ 	 */
+ 	unsigned int base_addr;
++
++#if IS_ENABLED(CONFIG_MACSEC)
++	/* MACsec fields:
++	 * - One SecY per device (enforced at the s/w implementation level)
++	 * - macsec_flows: list of h/w flows
++	 * - ingr_flows: bitmap of ingress flows
++	 * - egr_flows: bitmap of egress flows
++	 */
++	struct macsec_secy *secy;
++	struct list_head macsec_flows;
++	unsigned long ingr_flows;
++	unsigned long egr_flows;
++#endif
+ };
+ 
+ #ifdef CONFIG_OF_MDIO
+@@ -1951,6 +2009,634 @@ static int vsc8584_macsec_init(struct phy_device *phydev)
+ 
+ 	return 0;
+ }
++
++static void vsc8584_macsec_flow(struct phy_device *phydev,
++				struct macsec_flow *flow)
++{
++	struct vsc8531_private *priv = phydev->priv;
++	enum macsec_bank bank = flow->bank;
++	u32 val, match = 0, mask = 0, action = 0, idx = flow->index;
++
++	if (flow->match.tagged)
++		match |= MSCC_MS_SAM_MISC_MATCH_TAGGED;
++	if (flow->match.untagged)
++		match |= MSCC_MS_SAM_MISC_MATCH_UNTAGGED;
++
++	if (bank == MACSEC_INGR && flow->assoc_num >= 0) {
++		match |= MSCC_MS_SAM_MISC_MATCH_AN(flow->assoc_num);
++		mask |= MSCC_MS_SAM_MASK_AN_MASK(0x3);
++	}
++
++	if (bank == MACSEC_INGR && flow->match.sci && flow->rx_sa->sc->sci) {
++		match |= MSCC_MS_SAM_MISC_MATCH_TCI(BIT(3));
++		mask |= MSCC_MS_SAM_MASK_TCI_MASK(BIT(3)) |
++			MSCC_MS_SAM_MASK_SCI_MASK;
++
++		vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_SAM_MATCH_SCI_LO(idx),
++					 lower_32_bits(flow->rx_sa->sc->sci));
++		vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_SAM_MATCH_SCI_HI(idx),
++					 upper_32_bits(flow->rx_sa->sc->sci));
++	}
++
++	if (flow->match.etype) {
++		mask |= MSCC_MS_SAM_MASK_MAC_ETYPE_MASK;
++
++		vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_SAM_MAC_SA_MATCH_HI(idx),
++					 MSCC_MS_SAM_MAC_SA_MATCH_HI_ETYPE(htons(flow->etype)));
++	}
++
++	match |= MSCC_MS_SAM_MISC_MATCH_PRIORITY(flow->priority);
++
++	vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_SAM_MISC_MATCH(idx), match);
++	vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_SAM_MASK(idx), mask);
++
++	/* Action for matching packets */
++	if (flow->action.drop)
++		action = MSCC_MS_FLOW_DROP;
++	else if (flow->action.bypass || flow->port == MSCC_MS_PORT_UNCONTROLLED)
++		action = MSCC_MS_FLOW_BYPASS;
++	else
++		action = (bank == MACSEC_INGR) ?
++			 MSCC_MS_FLOW_INGRESS : MSCC_MS_FLOW_EGRESS;
++
++	val = MSCC_MS_SAM_FLOW_CTRL_FLOW_TYPE(action) |
++	      MSCC_MS_SAM_FLOW_CTRL_DROP_ACTION(MSCC_MS_ACTION_DROP) |
++	      MSCC_MS_SAM_FLOW_CTRL_DEST_PORT(flow->port);
++
++	if (action == MSCC_MS_FLOW_BYPASS)
++		goto write_ctrl;
++
++	if (bank == MACSEC_INGR) {
++		if (priv->secy->replay_protect)
++			val |= MSCC_MS_SAM_FLOW_CTRL_REPLAY_PROTECT;
++		if (priv->secy->validate_frames == MACSEC_VALIDATE_STRICT)
++			val |= MSCC_MS_SAM_FLOW_CTRL_VALIDATE_FRAMES(MSCC_MS_VALIDATE_STRICT);
++		else if (priv->secy->validate_frames == MACSEC_VALIDATE_CHECK)
++			val |= MSCC_MS_SAM_FLOW_CTRL_VALIDATE_FRAMES(MSCC_MS_VALIDATE_CHECK);
++	} else if (bank == MACSEC_EGR) {
++		if (priv->secy->protect_frames)
++			val |= MSCC_MS_SAM_FLOW_CTRL_PROTECT_FRAME;
++		if (priv->secy->tx_sc.encrypt)
++			val |= MSCC_MS_SAM_FLOW_CTRL_CONF_PROTECT;
++		if (priv->secy->tx_sc.send_sci)
++			val |= MSCC_MS_SAM_FLOW_CTRL_INCLUDE_SCI;
++	}
++
++write_ctrl:
++	vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_SAM_FLOW_CTRL(idx), val);
++}
++
++static struct macsec_flow *vsc8584_macsec_find_flow(struct macsec_context *ctx,
++						    enum macsec_bank bank)
++{
++	struct vsc8531_private *priv = ctx->phydev->priv;
++	struct macsec_flow *pos, *tmp;
++
++	list_for_each_entry_safe(pos, tmp, &priv->macsec_flows, list)
++		if (pos->assoc_num == ctx->sa.assoc_num && pos->bank == bank)
++			return pos;
++
++	return ERR_PTR(-ENOENT);
++}
++
++static void vsc8584_macsec_flow_enable(struct phy_device *phydev,
++				       struct macsec_flow *flow)
++{
++	enum macsec_bank bank = flow->bank;
++	u32 val, idx = flow->index;
++
++	if ((flow->bank == MACSEC_INGR && flow->rx_sa && !flow->rx_sa->active) ||
++	    (flow->bank == MACSEC_EGR && flow->tx_sa && !flow->tx_sa->active))
++		return;
++
++	/* Enable */
++	vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_SAM_ENTRY_SET1, BIT(idx));
++
++	/* Set in-use */
++	val = vsc8584_macsec_phy_read(phydev, bank, MSCC_MS_SAM_FLOW_CTRL(idx));
++	val |= MSCC_MS_SAM_FLOW_CTRL_SA_IN_USE;
++	vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_SAM_FLOW_CTRL(idx), val);
++}
++
++static void vsc8584_macsec_flow_disable(struct phy_device *phydev,
++					struct macsec_flow *flow)
++{
++	enum macsec_bank bank = flow->bank;
++	u32 val, idx = flow->index;
++
++	/* Disable */
++	vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_SAM_ENTRY_CLEAR1, BIT(idx));
++
++	/* Clear in-use */
++	val = vsc8584_macsec_phy_read(phydev, bank, MSCC_MS_SAM_FLOW_CTRL(idx));
++	val &= ~MSCC_MS_SAM_FLOW_CTRL_SA_IN_USE;
++	vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_SAM_FLOW_CTRL(idx), val);
++}
++
++static u32 vsc8584_macsec_flow_context_id(struct macsec_flow *flow)
++{
++	if (flow->bank == MACSEC_INGR)
++		return flow->index + MSCC_MS_MAX_FLOWS;
++
++	return flow->index;
++}
++
++/* Derive the AES key to get a key for the hash autentication */
++static int vsc8584_macsec_derive_key(const u8 key[MACSEC_KEYID_LEN],
++				     u16 key_len, u8 hkey[16])
++{
++	struct crypto_skcipher *tfm = crypto_alloc_skcipher("ecb(aes)", 0, 0);
++	struct skcipher_request *req = NULL;
++	struct scatterlist src, dst;
++	DECLARE_CRYPTO_WAIT(wait);
++	u32 input[4] = {0};
++	int ret;
++
++	if (IS_ERR(tfm))
++		return PTR_ERR(tfm);
++
++	req = skcipher_request_alloc(tfm, GFP_KERNEL);
++	if (!req) {
++		ret = -ENOMEM;
++		goto out;
++	}
++
++	skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG |
++				      CRYPTO_TFM_REQ_MAY_SLEEP, crypto_req_done,
++				      &wait);
++	ret = crypto_skcipher_setkey(tfm, key, key_len);
++	if (ret < 0)
++		goto out;
++
++	sg_init_one(&src, input, 16);
++	sg_init_one(&dst, hkey, 16);
++	skcipher_request_set_crypt(req, &src, &dst, 16, NULL);
++
++	ret = crypto_wait_req(crypto_skcipher_encrypt(req), &wait);
++
++out:
++	skcipher_request_free(req);
++	crypto_free_skcipher(tfm);
++	return ret;
++}
++
++static int vsc8584_macsec_transformation(struct phy_device *phydev,
++					 struct macsec_flow *flow)
++{
++	struct vsc8531_private *priv = phydev->priv;
++	enum macsec_bank bank = flow->bank;
++	int i, ret, index = flow->index;
++	u32 rec = 0, control = 0;
++	u8 hkey[16];
++	sci_t sci;
++
++	ret = vsc8584_macsec_derive_key(flow->key, priv->secy->key_len, hkey);
++	if (ret)
++		return ret;
++
++	switch (priv->secy->key_len) {
++	case 16:
++		control |= CONTROL_CRYPTO_ALG(CTRYPTO_ALG_AES_CTR_128);
++		break;
++	case 32:
++		control |= CONTROL_CRYPTO_ALG(CTRYPTO_ALG_AES_CTR_256);
++		break;
++	default:
++		return -EINVAL;
++	}
++
++	control |= (bank == MACSEC_EGR) ?
++		   (CONTROL_TYPE_EGRESS | CONTROL_AN(priv->secy->tx_sc.encoding_sa)) :
++		   (CONTROL_TYPE_INGRESS | CONTROL_SEQ_MASK);
++
++	control |= CONTROL_UPDATE_SEQ | CONTROL_ENCRYPT_AUTH | CONTROL_KEY_IN_CTX |
++		   CONTROL_IV0 | CONTROL_IV1 | CONTROL_IV_IN_SEQ |
++		   CONTROL_DIGEST_TYPE(0x2) | CONTROL_SEQ_TYPE(0x1) |
++		   CONTROL_AUTH_ALG(AUTH_ALG_AES_GHAS) | CONTROL_CONTEXT_ID;
++
++	/* Set the control word */
++	vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_XFORM_REC(index, rec++),
++				 control);
++
++	/* Set the context ID. Must be unique. */
++	vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_XFORM_REC(index, rec++),
++				 vsc8584_macsec_flow_context_id(flow));
++
++	/* Set the encryption/decryption key */
++	for (i = 0; i < priv->secy->key_len / sizeof(u32); i++)
++		vsc8584_macsec_phy_write(phydev, bank,
++					 MSCC_MS_XFORM_REC(index, rec++),
++					 ((u32 *)flow->key)[i]);
++
++	/* Set the authentication key */
++	for (i = 0; i < 4; i++)
++		vsc8584_macsec_phy_write(phydev, bank,
++					 MSCC_MS_XFORM_REC(index, rec++),
++					 ((u32 *)hkey)[i]);
++
++	/* Initial sequence number */
++	vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_XFORM_REC(index, rec++),
++				 bank == MACSEC_INGR ?
++				 flow->rx_sa->next_pn : flow->tx_sa->next_pn);
++
++	if (bank == MACSEC_INGR)
++		/* Set the mask (replay window size) */
++		vsc8584_macsec_phy_write(phydev, bank,
++					 MSCC_MS_XFORM_REC(index, rec++),
++					 priv->secy->replay_window);
++
++	/* Set the input vectors */
++	sci = bank == MACSEC_INGR ? flow->rx_sa->sc->sci : priv->secy->sci;
++	vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_XFORM_REC(index, rec++),
++				 lower_32_bits(sci));
++	vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_XFORM_REC(index, rec++),
++				 upper_32_bits(sci));
++
++	while (rec < 20)
++		vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_XFORM_REC(index, rec++),
++					 0);
++
++	flow->has_transformation = true;
++	return 0;
++}
++
++static struct macsec_flow *vsc8584_macsec_alloc_flow(struct vsc8531_private *priv,
++						     enum macsec_bank bank)
++{
++	unsigned long *bitmap = bank == MACSEC_INGR ?
++				&priv->ingr_flows : &priv->egr_flows;
++	struct macsec_flow *flow;
++	int index;
++
++	index = find_first_zero_bit(bitmap, MSCC_MS_MAX_FLOWS);
++
++	if (index == MSCC_MS_MAX_FLOWS)
++		return ERR_PTR(-ENOMEM);
++
++	flow = kzalloc(sizeof(*flow), GFP_KERNEL);
++	if (!flow)
++		return ERR_PTR(-ENOMEM);
++
++	set_bit(index, bitmap);
++	flow->index = index;
++	flow->bank = bank;
++	flow->priority = 8;
++	flow->assoc_num = -1;
++
++	list_add_tail(&flow->list, &priv->macsec_flows);
++	return flow;
++}
++
++static void vsc8584_macsec_free_flow(struct vsc8531_private *priv,
++				     struct macsec_flow *flow)
++{
++	unsigned long *bitmap = flow->bank == MACSEC_INGR ?
++				&priv->ingr_flows : &priv->egr_flows;
++
++	list_del(&flow->list);
++	clear_bit(flow->index, bitmap);
++	kfree(flow);
++}
++
++static int vsc8584_macsec_add_flow(struct phy_device *phydev,
++				   struct macsec_flow *flow, bool update)
++{
++	int ret;
++
++	flow->port = MSCC_MS_PORT_CONTROLLED;
++	vsc8584_macsec_flow(phydev, flow);
++
++	if (update)
++		return 0;
++
++	ret = vsc8584_macsec_transformation(phydev, flow);
++	if (ret) {
++		vsc8584_macsec_free_flow(phydev->priv, flow);
++		return ret;
++	}
++
++	return 0;
++}
++
++static int vsc8584_macsec_default_flows(struct phy_device *phydev)
++{
++	struct macsec_flow *flow;
++
++	/* Add a rule to let the MKA traffic go through, ingress */
++	flow = vsc8584_macsec_alloc_flow(phydev->priv, MACSEC_INGR);
++	if (IS_ERR(flow))
++		return PTR_ERR(flow);
++
++	flow->priority = 15;
++	flow->port = MSCC_MS_PORT_UNCONTROLLED;
++	flow->match.tagged = 1;
++	flow->match.untagged = 1;
++	flow->match.etype = 1;
++	flow->etype = ETH_P_PAE;
++	flow->action.bypass = 1;
++
++	vsc8584_macsec_flow(phydev, flow);
++	vsc8584_macsec_flow_enable(phydev, flow);
++
++	/* Add a rule to let the MKA traffic go through, egress */
++	flow = vsc8584_macsec_alloc_flow(phydev->priv, MACSEC_EGR);
++	if (IS_ERR(flow))
++		return PTR_ERR(flow);
++
++	flow->priority = 15;
++	flow->port = MSCC_MS_PORT_COMMON;
++	flow->match.untagged = 1;
++	flow->match.etype = 1;
++	flow->etype = ETH_P_PAE;
++	flow->action.bypass = 1;
++
++	vsc8584_macsec_flow(phydev, flow);
++	vsc8584_macsec_flow_enable(phydev, flow);
++
++	return 0;
++}
++
++static void vsc8584_macsec_del_flow(struct phy_device *phydev,
++				    struct macsec_flow *flow)
++{
++	vsc8584_macsec_flow_disable(phydev, flow);
++	vsc8584_macsec_free_flow(phydev->priv, flow);
++}
++
++static int __vsc8584_macsec_add_rxsa(struct macsec_context *ctx,
++				     struct macsec_flow *flow, bool update)
++{
++	struct phy_device *phydev = ctx->phydev;
++	struct vsc8531_private *priv = phydev->priv;
++
++	if (!flow) {
++		flow = vsc8584_macsec_alloc_flow(priv, MACSEC_INGR);
++		if (IS_ERR(flow))
++			return PTR_ERR(flow);
++
++		memcpy(flow->key, ctx->sa.key, priv->secy->key_len);
++	}
++
++	flow->assoc_num = ctx->sa.assoc_num;
++	flow->rx_sa = ctx->sa.rx_sa;
++
++	/* Always match tagged packets on ingress */
++	flow->match.tagged = 1;
++	flow->match.sci = 1;
++
++	if (priv->secy->validate_frames != MACSEC_VALIDATE_DISABLED)
++		flow->match.untagged = 1;
++
++	return vsc8584_macsec_add_flow(phydev, flow, update);
++}
++
++static int __vsc8584_macsec_add_txsa(struct macsec_context *ctx,
++				     struct macsec_flow *flow, bool update)
++{
++	struct phy_device *phydev = ctx->phydev;
++	struct vsc8531_private *priv = phydev->priv;
++
++	if (!flow) {
++		flow = vsc8584_macsec_alloc_flow(priv, MACSEC_EGR);
++		if (IS_ERR(flow))
++			return PTR_ERR(flow);
++
++		memcpy(flow->key, ctx->sa.key, priv->secy->key_len);
++	}
++
++	flow->assoc_num = ctx->sa.assoc_num;
++	flow->tx_sa = ctx->sa.tx_sa;
++
++	/* Always match untagged packets on egress */
++	flow->match.untagged = 1;
++
++	return vsc8584_macsec_add_flow(phydev, flow, update);
++}
++
++static int vsc8584_macsec_dev_open(struct macsec_context *ctx)
++{
++	struct vsc8531_private *priv = ctx->phydev->priv;
++	struct macsec_flow *flow, *tmp;
++
++	/* No operation to perform before the commit step */
++	if (ctx->prepare)
++		return 0;
++
++	list_for_each_entry_safe(flow, tmp, &priv->macsec_flows, list)
++		vsc8584_macsec_flow_enable(ctx->phydev, flow);
++
++	return 0;
++}
++
++static int vsc8584_macsec_dev_stop(struct macsec_context *ctx)
++{
++	struct vsc8531_private *priv = ctx->phydev->priv;
++	struct macsec_flow *flow, *tmp;
++
++	/* No operation to perform before the commit step */
++	if (ctx->prepare)
++		return 0;
++
++	list_for_each_entry_safe(flow, tmp, &priv->macsec_flows, list)
++		vsc8584_macsec_flow_disable(ctx->phydev, flow);
++
++	return 0;
++}
++
++static int vsc8584_macsec_add_secy(struct macsec_context *ctx)
++{
++	struct vsc8531_private *priv = ctx->phydev->priv;
++	struct macsec_secy *secy = ctx->secy;
++
++	if (ctx->prepare) {
++		if (priv->secy)
++			return -EEXIST;
++
++		return 0;
++	}
++
++	priv->secy = secy;
++
++	vsc8584_macsec_flow_default_action(ctx->phydev, MACSEC_EGR,
++					   secy->validate_frames != MACSEC_VALIDATE_DISABLED);
++	vsc8584_macsec_flow_default_action(ctx->phydev, MACSEC_INGR,
++					   secy->validate_frames != MACSEC_VALIDATE_DISABLED);
++
++	return vsc8584_macsec_default_flows(ctx->phydev);
++}
++
++static int vsc8584_macsec_del_secy(struct macsec_context *ctx)
++{
++	struct vsc8531_private *priv = ctx->phydev->priv;
++	struct macsec_flow *flow, *tmp;
++
++	/* No operation to perform before the commit step */
++	if (ctx->prepare)
++		return 0;
++
++	list_for_each_entry_safe(flow, tmp, &priv->macsec_flows, list)
++		vsc8584_macsec_del_flow(ctx->phydev, flow);
++
++	vsc8584_macsec_flow_default_action(ctx->phydev, MACSEC_EGR, false);
++	vsc8584_macsec_flow_default_action(ctx->phydev, MACSEC_INGR, false);
++
++	priv->secy = NULL;
++	return 0;
++}
++
++static int vsc8584_macsec_upd_secy(struct macsec_context *ctx)
++{
++	/* No operation to perform before the commit step */
++	if (ctx->prepare)
++		return 0;
++
++	vsc8584_macsec_del_secy(ctx);
++	return vsc8584_macsec_add_secy(ctx);
++}
++
++static int vsc8584_macsec_add_rxsc(struct macsec_context *ctx)
++{
++	/* Nothing to do */
++	return 0;
++}
++
++static int vsc8584_macsec_upd_rxsc(struct macsec_context *ctx)
++{
++	return -EOPNOTSUPP;
++}
++
++static int vsc8584_macsec_del_rxsc(struct macsec_context *ctx)
++{
++	struct vsc8531_private *priv = ctx->phydev->priv;
++	struct macsec_flow *flow, *tmp;
++
++	/* No operation to perform before the commit step */
++	if (ctx->prepare)
++		return 0;
++
++	list_for_each_entry_safe(flow, tmp, &priv->macsec_flows, list) {
++		if (flow->bank == MACSEC_INGR && flow->rx_sa &&
++		    flow->rx_sa->sc->sci == ctx->rx_sc->sci)
++			vsc8584_macsec_del_flow(ctx->phydev, flow);
++	}
++
++	return 0;
++}
++
++static int vsc8584_macsec_add_rxsa(struct macsec_context *ctx)
++{
++	struct macsec_flow *flow = NULL;
++
++	if (ctx->prepare)
++		return __vsc8584_macsec_add_rxsa(ctx, flow, false);
++
++	flow = vsc8584_macsec_find_flow(ctx, MACSEC_INGR);
++	if (IS_ERR(flow))
++		return PTR_ERR(flow);
++
++	vsc8584_macsec_flow_enable(ctx->phydev, flow);
++	return 0;
++}
++
++static int vsc8584_macsec_upd_rxsa(struct macsec_context *ctx)
++{
++	struct macsec_flow *flow;
++
++	flow = vsc8584_macsec_find_flow(ctx, MACSEC_INGR);
++	if (IS_ERR(flow))
++		return PTR_ERR(flow);
++
++	if (ctx->prepare) {
++		/* Make sure the flow is disabled before updating it */
++		vsc8584_macsec_flow_disable(ctx->phydev, flow);
++
++		return __vsc8584_macsec_add_rxsa(ctx, flow, true);
++	}
++
++	vsc8584_macsec_flow_enable(ctx->phydev, flow);
++	return 0;
++}
++
++static int vsc8584_macsec_del_rxsa(struct macsec_context *ctx)
++{
++	struct macsec_flow *flow;
++
++	flow = vsc8584_macsec_find_flow(ctx, MACSEC_INGR);
++
++	if (IS_ERR(flow))
++		return PTR_ERR(flow);
++	if (ctx->prepare)
++		return 0;
++
++	vsc8584_macsec_del_flow(ctx->phydev, flow);
++	return 0;
++}
++
++static int vsc8584_macsec_add_txsa(struct macsec_context *ctx)
++{
++	struct macsec_flow *flow = NULL;
++
++	if (ctx->prepare)
++		return __vsc8584_macsec_add_txsa(ctx, flow, false);
++
++	flow = vsc8584_macsec_find_flow(ctx, MACSEC_EGR);
++	if (IS_ERR(flow))
++		return PTR_ERR(flow);
++
++	vsc8584_macsec_flow_enable(ctx->phydev, flow);
++	return 0;
++}
++
++static int vsc8584_macsec_upd_txsa(struct macsec_context *ctx)
++{
++	struct macsec_flow *flow;
++
++	flow = vsc8584_macsec_find_flow(ctx, MACSEC_EGR);
++	if (IS_ERR(flow))
++		return PTR_ERR(flow);
++
++	if (ctx->prepare) {
++		/* Make sure the flow is disabled before updating it */
++		vsc8584_macsec_flow_disable(ctx->phydev, flow);
++
++		return __vsc8584_macsec_add_txsa(ctx, flow, true);
++	}
++
++	vsc8584_macsec_flow_enable(ctx->phydev, flow);
++	return 0;
++}
++
++static int vsc8584_macsec_del_txsa(struct macsec_context *ctx)
++{
++	struct macsec_flow *flow;
++
++	flow = vsc8584_macsec_find_flow(ctx, MACSEC_EGR);
++
++	if (IS_ERR(flow))
++		return PTR_ERR(flow);
++	if (ctx->prepare)
++		return 0;
++
++	vsc8584_macsec_del_flow(ctx->phydev, flow);
++	return 0;
++}
++
++static struct macsec_ops vsc8584_macsec_ops = {
++	.mdo_dev_open = vsc8584_macsec_dev_open,
++	.mdo_dev_stop = vsc8584_macsec_dev_stop,
++	.mdo_add_secy = vsc8584_macsec_add_secy,
++	.mdo_upd_secy = vsc8584_macsec_upd_secy,
++	.mdo_del_secy = vsc8584_macsec_del_secy,
++	.mdo_add_rxsc = vsc8584_macsec_add_rxsc,
++	.mdo_upd_rxsc = vsc8584_macsec_upd_rxsc,
++	.mdo_del_rxsc = vsc8584_macsec_del_rxsc,
++	.mdo_add_rxsa = vsc8584_macsec_add_rxsa,
++	.mdo_upd_rxsa = vsc8584_macsec_upd_rxsa,
++	.mdo_del_rxsa = vsc8584_macsec_del_rxsa,
++	.mdo_add_txsa = vsc8584_macsec_add_txsa,
++	.mdo_upd_txsa = vsc8584_macsec_upd_txsa,
++	.mdo_del_txsa = vsc8584_macsec_del_txsa,
++};
+ #endif /* CONFIG_MACSEC */
+ 
+ /* Check if one PHY has already done the init of the parts common to all PHYs
+@@ -2109,6 +2795,11 @@ static int vsc8584_config_init(struct phy_device *phydev)
+ 	case PHY_ID_VSC8575:
+ 	case PHY_ID_VSC8582:
+ 	case PHY_ID_VSC8584:
++		INIT_LIST_HEAD(&vsc8531->macsec_flows);
++		vsc8531->secy = NULL;
++
++		phydev->macsec_ops = &vsc8584_macsec_ops;
++
+ 		ret = vsc8584_macsec_init(phydev);
+ 		if (ret)
+ 			goto err;
+diff --git a/drivers/net/phy/mscc_macsec.h b/drivers/net/phy/mscc_macsec.h
+index 0d108da28dad2..9b5d0af91d204 100644
+--- a/drivers/net/phy/mscc_macsec.h
++++ b/drivers/net/phy/mscc_macsec.h
+@@ -8,6 +8,8 @@
+ #ifndef _MSCC_OCELOT_MACSEC_H_
+ #define _MSCC_OCELOT_MACSEC_H_
+ 
++#define MSCC_MS_MAX_FLOWS		16
++
+ #define CONTROL_TYPE_EGRESS		0x6
+ #define CONTROL_TYPE_INGRESS		0xf
+ #define CONTROL_IV0			BIT(5)
+@@ -59,6 +61,8 @@ enum mscc_macsec_validate_levels {
+ #define MSCC_MS_XFORM_REC(x, y)		(((x) << 5) + (y))
+ #define MSCC_MS_ENA_CFG			0x800
+ #define MSCC_MS_FC_CFG			0x804
++#define MSCC_MS_SAM_MAC_SA_MATCH_LO(x)	(0x1000 + ((x) << 4))
++#define MSCC_MS_SAM_MAC_SA_MATCH_HI(x)	(0x1001 + ((x) << 4))
+ #define MSCC_MS_SAM_MISC_MATCH(x)	(0x1004 + ((x) << 4))
+ #define MSCC_MS_SAM_MATCH_SCI_LO(x)	(0x1005 + ((x) << 4))
+ #define MSCC_MS_SAM_MATCH_SCI_HI(x)	(0x1006 + ((x) << 4))
+-- 
+cgit 1.2.3-1.el7
+
diff --git a/target/linux/mediatek/patches-5.4/999-1759-09-v5.18-net-macsec-PN-wrap-callback.patch b/target/linux/mediatek/patches-5.4/999-1759-09-v5.18-net-macsec-PN-wrap-callback.patch
new file mode 100644
index 0000000..75d7839
--- /dev/null
+++ b/target/linux/mediatek/patches-5.4/999-1759-09-v5.18-net-macsec-PN-wrap-callback.patch
@@ -0,0 +1,72 @@
+From 5c937de78b39e47ce9924fc4b863c5b727edc328 Mon Sep 17 00:00:00 2001
+From: Antoine Tenart <antoine.tenart@bootlin.com>
+Date: Mon, 13 Jan 2020 23:31:47 +0100
+Subject: net: macsec: PN wrap callback
+
+Allow to call macsec_pn_wrapped from hardware drivers to notify when a
+PN rolls over. Some drivers might used an interrupt to implement this.
+
+Signed-off-by: Antoine Tenart <antoine.tenart@bootlin.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+---
+ drivers/net/macsec.c | 25 +++++++++++++++++++------
+ include/net/macsec.h |  2 ++
+ 2 files changed, 21 insertions(+), 6 deletions(-)
+
+diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
+index e515919e8687f..45bfd99f17fa9 100644
+--- a/drivers/net/macsec.c
++++ b/drivers/net/macsec.c
+@@ -424,6 +424,23 @@ static struct macsec_eth_header *macsec_ethhdr(struct sk_buff *skb)
+ 	return (struct macsec_eth_header *)skb_mac_header(skb);
+ }
+ 
++static void __macsec_pn_wrapped(struct macsec_secy *secy,
++				struct macsec_tx_sa *tx_sa)
++{
++	pr_debug("PN wrapped, transitioning to !oper\n");
++	tx_sa->active = false;
++	if (secy->protect_frames)
++		secy->operational = false;
++}
++
++void macsec_pn_wrapped(struct macsec_secy *secy, struct macsec_tx_sa *tx_sa)
++{
++	spin_lock_bh(&tx_sa->lock);
++	__macsec_pn_wrapped(secy, tx_sa);
++	spin_unlock_bh(&tx_sa->lock);
++}
++EXPORT_SYMBOL_GPL(macsec_pn_wrapped);
++
+ static u32 tx_sa_update_pn(struct macsec_tx_sa *tx_sa, struct macsec_secy *secy)
+ {
+ 	u32 pn;
+@@ -432,12 +449,8 @@ static u32 tx_sa_update_pn(struct macsec_tx_sa *tx_sa, struct macsec_secy *secy)
+ 	pn = tx_sa->next_pn;
+ 
+ 	tx_sa->next_pn++;
+-	if (tx_sa->next_pn == 0) {
+-		pr_debug("PN wrapped, transitioning to !oper\n");
+-		tx_sa->active = false;
+-		if (secy->protect_frames)
+-			secy->operational = false;
+-	}
++	if (tx_sa->next_pn == 0)
++		__macsec_pn_wrapped(secy, tx_sa);
+ 	spin_unlock_bh(&tx_sa->lock);
+ 
+ 	return pn;
+diff --git a/include/net/macsec.h b/include/net/macsec.h
+index 16e7e5061178e..92e43db8b5667 100644
+--- a/include/net/macsec.h
++++ b/include/net/macsec.h
+@@ -219,4 +219,6 @@ struct macsec_ops {
+ 	int (*mdo_del_txsa)(struct macsec_context *ctx);
+ };
+ 
++void macsec_pn_wrapped(struct macsec_secy *secy, struct macsec_tx_sa *tx_sa);
++
+ #endif /* _NET_MACSEC_H_ */
+-- 
+cgit 1.2.3-1.el7
+
diff --git a/target/linux/mediatek/patches-5.4/999-1760-10-v5.18-net-phy-mscc-PN-rollover-support.patch b/target/linux/mediatek/patches-5.4/999-1760-10-v5.18-net-phy-mscc-PN-rollover-support.patch
new file mode 100644
index 0000000..2c9bef6
--- /dev/null
+++ b/target/linux/mediatek/patches-5.4/999-1760-10-v5.18-net-phy-mscc-PN-rollover-support.patch
@@ -0,0 +1,129 @@
+From 781449a4ae3b381950ee9aec4d8a54e35f66ab9b Mon Sep 17 00:00:00 2001
+From: Antoine Tenart <antoine.tenart@bootlin.com>
+Date: Mon, 13 Jan 2020 23:31:48 +0100
+Subject: net: phy: mscc: PN rollover support
+
+This patch adds support for handling MACsec PN rollover in the mscc PHY
+driver. When a flow rolls over, an interrupt is fired. This patch adds
+the logic to check all flows and identify the one rolling over in the
+handle_interrupt PHY helper, then disables the flow and report the event
+to the MACsec core.
+
+Signed-off-by: Antoine Tenart <antoine.tenart@bootlin.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+---
+ drivers/net/phy/mscc.c        | 60 ++++++++++++++++++++++++++++++++++++++++++-
+ drivers/net/phy/mscc_macsec.h |  2 ++
+ 2 files changed, 61 insertions(+), 1 deletion(-)
+
+--- a/drivers/net/phy/mscc.c
++++ b/drivers/net/phy/mscc.c
+@@ -80,7 +80,7 @@ enum rgmii_rx_clock_delay {
+ #define MSCC_PHY_EXT_PHY_CNTL_2		  24
+ 
+ #define MII_VSC85XX_INT_MASK		  25
+-#define MII_VSC85XX_INT_MASK_MASK	  0xa000
++#define MII_VSC85XX_INT_MASK_MASK	  0xa020
+ #define MII_VSC85XX_INT_MASK_WOL	  0x0040
+ #define MII_VSC85XX_INT_STATUS		  26
+ 
+@@ -207,6 +207,9 @@ enum macsec_bank {
+ #define SECURE_ON_ENABLE		  0x8000
+ #define SECURE_ON_PASSWD_LEN_4		  0x4000
+ 
++#define MSCC_PHY_EXTENDED_INT		  28
++#define MSCC_PHY_EXTENDED_INT_MS_EGR	  BIT(9)
++
+ /* Extended Page 3 Registers */
+ #define MSCC_PHY_SERDES_TX_VALID_CNT	  21
+ #define MSCC_PHY_SERDES_TX_CRC_ERR_CNT	  22
+@@ -2805,6 +2808,43 @@ err:
+ 	return ret;
+ }
+ 
++static int vsc8584_handle_interrupt(struct phy_device *phydev)
++{
++#if IS_ENABLED(CONFIG_MACSEC)
++	struct vsc8531_private *priv = phydev->priv;
++	struct macsec_flow *flow, *tmp;
++	u32 cause, rec;
++
++	/* Check MACsec PN rollover */
++	cause = vsc8584_macsec_phy_read(phydev, MACSEC_EGR,
++					MSCC_MS_INTR_CTRL_STATUS);
++	cause &= MSCC_MS_INTR_CTRL_STATUS_INTR_CLR_STATUS_M;
++	if (!(cause & MACSEC_INTR_CTRL_STATUS_ROLLOVER))
++		goto skip_rollover;
++
++	rec = 6 + priv->secy->key_len / sizeof(u32);
++	list_for_each_entry_safe(flow, tmp, &priv->macsec_flows, list) {
++		u32 val;
++
++		if (flow->bank != MACSEC_EGR || !flow->has_transformation)
++			continue;
++
++		val = vsc8584_macsec_phy_read(phydev, MACSEC_EGR,
++					      MSCC_MS_XFORM_REC(flow->index, rec));
++		if (val == 0xffffffff) {
++			vsc8584_macsec_flow_disable(phydev, flow);
++			macsec_pn_wrapped(priv->secy, flow->tx_sa);
++			break;
++		}
++	}
++
++skip_rollover:
++#endif
++
++	phy_mac_interrupt(phydev);
++	return 0;
++}
++
+ static int vsc85xx_config_init(struct phy_device *phydev)
+ {
+ 	int rc, i, phy_id;
+@@ -3248,6 +3288,20 @@ static int vsc85xx_config_intr(struct ph
+ 	int rc;
+ 
+ 	if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
++#if IS_ENABLED(CONFIG_MACSEC)
++		phy_write(phydev, MSCC_EXT_PAGE_ACCESS,
++			  MSCC_PHY_PAGE_EXTENDED_2);
++		phy_write(phydev, MSCC_PHY_EXTENDED_INT,
++			  MSCC_PHY_EXTENDED_INT_MS_EGR);
++		phy_write(phydev, MSCC_EXT_PAGE_ACCESS,
++			  MSCC_PHY_PAGE_STANDARD);
++
++		vsc8584_macsec_phy_write(phydev, MACSEC_EGR,
++					 MSCC_MS_AIC_CTRL, 0xf);
++		vsc8584_macsec_phy_write(phydev, MACSEC_EGR,
++			MSCC_MS_INTR_CTRL_STATUS,
++			MSCC_MS_INTR_CTRL_STATUS_INTR_ENABLE(MACSEC_INTR_CTRL_STATUS_ROLLOVER));
++#endif
+ 		rc = phy_write(phydev, MII_VSC85XX_INT_MASK,
+ 			       MII_VSC85XX_INT_MASK_MASK);
+ 	} else {
+@@ -3553,6 +3607,7 @@ static struct phy_driver vsc85xx_driver[
+ 	.config_aneg    = &vsc85xx_config_aneg,
+ 	.aneg_done	= &genphy_aneg_done,
+ 	.read_status	= &vsc85xx_read_status,
++	.handle_interrupt = &vsc8584_handle_interrupt,
+ 	.ack_interrupt  = &vsc85xx_ack_interrupt,
+ 	.config_intr    = &vsc85xx_config_intr,
+ 	.did_interrupt  = &vsc8584_did_interrupt,
+--- a/drivers/net/phy/mscc_macsec.h
++++ b/drivers/net/phy/mscc_macsec.h
+@@ -83,6 +83,7 @@ enum mscc_macsec_validate_levels {
+ #define MSCC_MS_STATUS_CONTEXT_CTRL	0x3d02
+ #define MSCC_MS_INTR_CTRL_STATUS	0x3d04
+ #define MSCC_MS_BLOCK_CTX_UPDATE	0x3d0c
++#define MSCC_MS_AIC_CTRL		0x3e02
+ 
+ /* MACSEC_ENA_CFG */
+ #define MSCC_MS_ENA_CFG_CLK_ENA				BIT(0)
+@@ -260,5 +261,6 @@ enum mscc_macsec_validate_levels {
+ #define MSCC_MS_INTR_CTRL_STATUS_INTR_CLR_STATUS_M	GENMASK(15, 0)
+ #define MSCC_MS_INTR_CTRL_STATUS_INTR_ENABLE(x)		((x) << 16)
+ #define MSCC_MS_INTR_CTRL_STATUS_INTR_ENABLE_M		GENMASK(31, 16)
++#define MACSEC_INTR_CTRL_STATUS_ROLLOVER		BIT(5)
+ 
+ #endif
diff --git a/target/linux/mediatek/patches-5.4/999-1761-v5.18-net-macsec-invoke-mdo_upd_secy-callback-when-mac-address-changed.patch b/target/linux/mediatek/patches-5.4/999-1761-v5.18-net-macsec-invoke-mdo_upd_secy-callback-when-mac-address-changed.patch
new file mode 100644
index 0000000..ab7986b
--- /dev/null
+++ b/target/linux/mediatek/patches-5.4/999-1761-v5.18-net-macsec-invoke-mdo_upd_secy-callback-when-mac-address-changed.patch
@@ -0,0 +1,44 @@
+From 09f4136c5d6b4c5144a965bd086009863d58ff08 Mon Sep 17 00:00:00 2001
+From: Dmitry Bogdanov <dbogdanov@marvell.com>
+Date: Tue, 10 Mar 2020 18:22:25 +0300
+Subject: net: macsec: invoke mdo_upd_secy callback when mac address changed
+
+Notify the offload engine about MAC address change to reconfigure it
+accordingly.
+
+Fixes: 3cf3227a21d1 ("net: macsec: hardware offloading infrastructure")
+Signed-off-by: Dmitry Bogdanov <dbogdanov@marvell.com>
+Signed-off-by: Mark Starovoytov <mstarovoitov@marvell.com>
+Signed-off-by: Igor Russkikh <irusskikh@marvell.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+---
+ drivers/net/macsec.c | 13 +++++++++++++
+ 1 file changed, 13 insertions(+)
+
+diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
+index 66c6392251bc3..6ec6fc191a6e4 100644
+--- a/drivers/net/macsec.c
++++ b/drivers/net/macsec.c
+@@ -3274,6 +3274,19 @@ static int macsec_set_mac_address(struct net_device *dev, void *p)
+ out:
+ 	ether_addr_copy(dev->dev_addr, addr->sa_data);
+ 	macsec->secy.sci = dev_to_sci(dev, MACSEC_PORT_ES);
++
++	/* If h/w offloading is available, propagate to the device */
++	if (macsec_is_offloaded(macsec)) {
++		const struct macsec_ops *ops;
++		struct macsec_context ctx;
++
++		ops = macsec_get_ops(macsec, &ctx);
++		if (ops) {
++			ctx.secy = &macsec->secy;
++			macsec_offload(ops->mdo_upd_secy, &ctx);
++		}
++	}
++
+ 	return 0;
+ }
+ 
+-- 
+cgit 1.2.3-1.el7
+
diff --git a/target/linux/mediatek/patches-5.4/999-1762-v5.18-net-macsec-Support-XPN-frame-handling-IEEE-802.1AEbw.patch b/target/linux/mediatek/patches-5.4/999-1762-v5.18-net-macsec-Support-XPN-frame-handling-IEEE-802.1AEbw.patch
new file mode 100644
index 0000000..357977e
--- /dev/null
+++ b/target/linux/mediatek/patches-5.4/999-1762-v5.18-net-macsec-Support-XPN-frame-handling-IEEE-802.1AEbw.patch
@@ -0,0 +1,479 @@
+From a21ecf0e033807b976967286e6c392f48ee2049f Mon Sep 17 00:00:00 2001
+From: Era Mayflower <mayflowerera@gmail.com>
+Date: Mon, 9 Mar 2020 19:47:01 +0000
+Subject: macsec: Support XPN frame handling - IEEE 802.1AEbw
+
+Support extended packet number cipher suites (802.1AEbw) frames handling.
+This does not include the needed netlink patches.
+
+    * Added xpn boolean field to `struct macsec_secy`.
+    * Added ssci field to `struct_macsec_tx_sa` (802.1AE figure 10-5).
+    * Added ssci field to `struct_macsec_rx_sa` (802.1AE figure 10-5).
+    * Added salt field to `struct macsec_key` (802.1AE 10.7 NOTE 1).
+    * Created pn_t type for easy access to lower and upper halves.
+    * Created salt_t type for easy access to the "ssci" and "pn" parts.
+    * Created `macsec_fill_iv_xpn` function to create IV in XPN mode.
+    * Support in PN recovery and preliminary replay check in XPN mode.
+
+In addition, according to IEEE 802.1AEbw figure 10-5, the PN of incoming
+frame can be 0 when XPN cipher suite is used, so fixed the function
+`macsec_validate_skb` to fail on PN=0 only if XPN is off.
+
+Signed-off-by: Era Mayflower <mayflowerera@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+---
+ drivers/net/macsec.c | 130 +++++++++++++++++++++++++++++++++++++--------------
+ include/net/macsec.h |  45 ++++++++++++++++--
+ 2 files changed, 136 insertions(+), 39 deletions(-)
+
+diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
+index 6ec6fc191a6e4..6c71e250cccb0 100644
+--- a/drivers/net/macsec.c
++++ b/drivers/net/macsec.c
+@@ -19,6 +19,7 @@
+ #include <net/gro_cells.h>
+ #include <net/macsec.h>
+ #include <linux/phy.h>
++#include <linux/byteorder/generic.h>
+ 
+ #include <uapi/linux/if_macsec.h>
+ 
+@@ -68,6 +69,16 @@ struct macsec_eth_header {
+ 	     sc;					\
+ 	     sc = rtnl_dereference(sc->next))
+ 
++#define pn_same_half(pn1, pn2) (!(((pn1) >> 31) ^ ((pn2) >> 31)))
++
++struct gcm_iv_xpn {
++	union {
++		u8 short_secure_channel_id[4];
++		ssci_t ssci;
++	};
++	__be64 pn;
++} __packed;
++
+ struct gcm_iv {
+ 	union {
+ 		u8 secure_channel_id[8];
+@@ -372,8 +383,8 @@ static const struct macsec_ops *macsec_get_ops(struct macsec_dev *macsec,
+ 	return __macsec_get_ops(macsec->offload, macsec, ctx);
+ }
+ 
+-/* validate MACsec packet according to IEEE 802.1AE-2006 9.12 */
+-static bool macsec_validate_skb(struct sk_buff *skb, u16 icv_len)
++/* validate MACsec packet according to IEEE 802.1AE-2018 9.12 */
++static bool macsec_validate_skb(struct sk_buff *skb, u16 icv_len, bool xpn)
+ {
+ 	struct macsec_eth_header *h = (struct macsec_eth_header *)skb->data;
+ 	int len = skb->len - 2 * ETH_ALEN;
+@@ -398,8 +409,8 @@ static bool macsec_validate_skb(struct sk_buff *skb, u16 icv_len)
+ 	if (h->unused)
+ 		return false;
+ 
+-	/* rx.pn != 0 (figure 10-5) */
+-	if (!h->packet_number)
++	/* rx.pn != 0 if not XPN (figure 10-5 with 802.11AEbw-2013 amendment) */
++	if (!h->packet_number && !xpn)
+ 		return false;
+ 
+ 	/* length check, f) g) h) i) */
+@@ -411,6 +422,15 @@ static bool macsec_validate_skb(struct sk_buff *skb, u16 icv_len)
+ #define MACSEC_NEEDED_HEADROOM (macsec_extra_len(true))
+ #define MACSEC_NEEDED_TAILROOM MACSEC_STD_ICV_LEN
+ 
++static void macsec_fill_iv_xpn(unsigned char *iv, ssci_t ssci, u64 pn,
++			       salt_t salt)
++{
++	struct gcm_iv_xpn *gcm_iv = (struct gcm_iv_xpn *)iv;
++
++	gcm_iv->ssci = ssci ^ salt.ssci;
++	gcm_iv->pn = cpu_to_be64(pn) ^ salt.pn;
++}
++
+ static void macsec_fill_iv(unsigned char *iv, sci_t sci, u32 pn)
+ {
+ 	struct gcm_iv *gcm_iv = (struct gcm_iv *)iv;
+@@ -446,14 +466,19 @@ void macsec_pn_wrapped(struct macsec_secy *secy, struct macsec_tx_sa *tx_sa)
+ }
+ EXPORT_SYMBOL_GPL(macsec_pn_wrapped);
+ 
+-static u32 tx_sa_update_pn(struct macsec_tx_sa *tx_sa, struct macsec_secy *secy)
++static pn_t tx_sa_update_pn(struct macsec_tx_sa *tx_sa,
++			    struct macsec_secy *secy)
+ {
+-	u32 pn;
++	pn_t pn;
+ 
+ 	spin_lock_bh(&tx_sa->lock);
+-	pn = tx_sa->next_pn;
+ 
+-	tx_sa->next_pn++;
++	pn = tx_sa->next_pn_halves;
++	if (secy->xpn)
++		tx_sa->next_pn++;
++	else
++		tx_sa->next_pn_halves.lower++;
++
+ 	if (tx_sa->next_pn == 0)
+ 		__macsec_pn_wrapped(secy, tx_sa);
+ 	spin_unlock_bh(&tx_sa->lock);
+@@ -568,7 +593,7 @@ static struct sk_buff *macsec_encrypt(struct sk_buff *skb,
+ 	struct macsec_tx_sa *tx_sa;
+ 	struct macsec_dev *macsec = macsec_priv(dev);
+ 	bool sci_present;
+-	u32 pn;
++	pn_t pn;
+ 
+ 	secy = &macsec->secy;
+ 	tx_sc = &secy->tx_sc;
+@@ -610,12 +635,12 @@ static struct sk_buff *macsec_encrypt(struct sk_buff *skb,
+ 	memmove(hh, eth, 2 * ETH_ALEN);
+ 
+ 	pn = tx_sa_update_pn(tx_sa, secy);
+-	if (pn == 0) {
++	if (pn.full64 == 0) {
+ 		macsec_txsa_put(tx_sa);
+ 		kfree_skb(skb);
+ 		return ERR_PTR(-ENOLINK);
+ 	}
+-	macsec_fill_sectag(hh, secy, pn, sci_present);
++	macsec_fill_sectag(hh, secy, pn.lower, sci_present);
+ 	macsec_set_shortlen(hh, unprotected_len - 2 * ETH_ALEN);
+ 
+ 	skb_put(skb, secy->icv_len);
+@@ -646,7 +671,10 @@ static struct sk_buff *macsec_encrypt(struct sk_buff *skb,
+ 		return ERR_PTR(-ENOMEM);
+ 	}
+ 
+-	macsec_fill_iv(iv, secy->sci, pn);
++	if (secy->xpn)
++		macsec_fill_iv_xpn(iv, tx_sa->ssci, pn.full64, tx_sa->key.salt);
++	else
++		macsec_fill_iv(iv, secy->sci, pn.lower);
+ 
+ 	sg_init_table(sg, ret);
+ 	ret = skb_to_sgvec(skb, sg, 0, skb->len);
+@@ -698,13 +726,14 @@ static bool macsec_post_decrypt(struct sk_buff *skb, struct macsec_secy *secy, u
+ 	u32 lowest_pn = 0;
+ 
+ 	spin_lock(&rx_sa->lock);
+-	if (rx_sa->next_pn >= secy->replay_window)
+-		lowest_pn = rx_sa->next_pn - secy->replay_window;
++	if (rx_sa->next_pn_halves.lower >= secy->replay_window)
++		lowest_pn = rx_sa->next_pn_halves.lower - secy->replay_window;
+ 
+ 	/* Now perform replay protection check again
+ 	 * (see IEEE 802.1AE-2006 figure 10-5)
+ 	 */
+-	if (secy->replay_protect && pn < lowest_pn) {
++	if (secy->replay_protect && pn < lowest_pn &&
++	    (!secy->xpn || pn_same_half(pn, lowest_pn))) {
+ 		spin_unlock(&rx_sa->lock);
+ 		u64_stats_update_begin(&rxsc_stats->syncp);
+ 		rxsc_stats->stats.InPktsLate++;
+@@ -753,8 +782,15 @@ static bool macsec_post_decrypt(struct sk_buff *skb, struct macsec_secy *secy, u
+ 		}
+ 		u64_stats_update_end(&rxsc_stats->syncp);
+ 
+-		if (pn >= rx_sa->next_pn)
+-			rx_sa->next_pn = pn + 1;
++		// Instead of "pn >=" - to support pn overflow in xpn
++		if (pn + 1 > rx_sa->next_pn_halves.lower) {
++			rx_sa->next_pn_halves.lower = pn + 1;
++		} else if (secy->xpn &&
++			   !pn_same_half(pn, rx_sa->next_pn_halves.lower)) {
++			rx_sa->next_pn_halves.upper++;
++			rx_sa->next_pn_halves.lower = pn + 1;
++		}
++
+ 		spin_unlock(&rx_sa->lock);
+ 	}
+ 
+@@ -841,6 +877,7 @@ static struct sk_buff *macsec_decrypt(struct sk_buff *skb,
+ 	unsigned char *iv;
+ 	struct aead_request *req;
+ 	struct macsec_eth_header *hdr;
++	u32 hdr_pn;
+ 	u16 icv_len = secy->icv_len;
+ 
+ 	macsec_skb_cb(skb)->valid = false;
+@@ -860,7 +897,21 @@ static struct sk_buff *macsec_decrypt(struct sk_buff *skb,
+ 	}
+ 
+ 	hdr = (struct macsec_eth_header *)skb->data;
+-	macsec_fill_iv(iv, sci, ntohl(hdr->packet_number));
++	hdr_pn = ntohl(hdr->packet_number);
++
++	if (secy->xpn) {
++		pn_t recovered_pn = rx_sa->next_pn_halves;
++
++		recovered_pn.lower = hdr_pn;
++		if (hdr_pn < rx_sa->next_pn_halves.lower &&
++		    !pn_same_half(hdr_pn, rx_sa->next_pn_halves.lower))
++			recovered_pn.upper++;
++
++		macsec_fill_iv_xpn(iv, rx_sa->ssci, recovered_pn.full64,
++				   rx_sa->key.salt);
++	} else {
++		macsec_fill_iv(iv, sci, hdr_pn);
++	}
+ 
+ 	sg_init_table(sg, ret);
+ 	ret = skb_to_sgvec(skb, sg, 0, skb->len);
+@@ -1001,7 +1052,7 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb)
+ 	struct macsec_rxh_data *rxd;
+ 	struct macsec_dev *macsec;
+ 	sci_t sci;
+-	u32 pn;
++	u32 hdr_pn;
+ 	bool cbit;
+ 	struct pcpu_rx_sc_stats *rxsc_stats;
+ 	struct pcpu_secy_stats *secy_stats;
+@@ -1072,7 +1123,7 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb)
+ 	secy_stats = this_cpu_ptr(macsec->stats);
+ 	rxsc_stats = this_cpu_ptr(rx_sc->stats);
+ 
+-	if (!macsec_validate_skb(skb, secy->icv_len)) {
++	if (!macsec_validate_skb(skb, secy->icv_len, secy->xpn)) {
+ 		u64_stats_update_begin(&secy_stats->syncp);
+ 		secy_stats->stats.InPktsBadTag++;
+ 		u64_stats_update_end(&secy_stats->syncp);
+@@ -1104,13 +1155,16 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb)
+ 	}
+ 
+ 	/* First, PN check to avoid decrypting obviously wrong packets */
+-	pn = ntohl(hdr->packet_number);
++	hdr_pn = ntohl(hdr->packet_number);
+ 	if (secy->replay_protect) {
+ 		bool late;
+ 
+ 		spin_lock(&rx_sa->lock);
+-		late = rx_sa->next_pn >= secy->replay_window &&
+-		       pn < (rx_sa->next_pn - secy->replay_window);
++		late = rx_sa->next_pn_halves.lower >= secy->replay_window &&
++		       hdr_pn < (rx_sa->next_pn_halves.lower - secy->replay_window);
++
++		if (secy->xpn)
++			late = late && pn_same_half(rx_sa->next_pn_halves.lower, hdr_pn);
+ 		spin_unlock(&rx_sa->lock);
+ 
+ 		if (late) {
+@@ -1139,7 +1193,7 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb)
+ 		return RX_HANDLER_CONSUMED;
+ 	}
+ 
+-	if (!macsec_post_decrypt(skb, secy, pn))
++	if (!macsec_post_decrypt(skb, secy, hdr_pn))
+ 		goto drop;
+ 
+ deliver:
+@@ -1666,7 +1720,7 @@ static int macsec_add_rxsa(struct sk_buff *skb, struct genl_info *info)
+ 
+ 	if (tb_sa[MACSEC_SA_ATTR_PN]) {
+ 		spin_lock_bh(&rx_sa->lock);
+-		rx_sa->next_pn = nla_get_u32(tb_sa[MACSEC_SA_ATTR_PN]);
++		rx_sa->next_pn_halves.lower = nla_get_u32(tb_sa[MACSEC_SA_ATTR_PN]);
+ 		spin_unlock_bh(&rx_sa->lock);
+ 	}
+ 
+@@ -1873,7 +1927,7 @@ static int macsec_add_txsa(struct sk_buff *skb, struct genl_info *info)
+ 	}
+ 
+ 	spin_lock_bh(&tx_sa->lock);
+-	tx_sa->next_pn = nla_get_u32(tb_sa[MACSEC_SA_ATTR_PN]);
++	tx_sa->next_pn_halves.lower = nla_get_u32(tb_sa[MACSEC_SA_ATTR_PN]);
+ 	spin_unlock_bh(&tx_sa->lock);
+ 
+ 	if (tb_sa[MACSEC_SA_ATTR_ACTIVE])
+@@ -2137,9 +2191,11 @@ static int macsec_upd_txsa(struct sk_buff *skb, struct genl_info *info)
+ 	u8 assoc_num;
+ 	struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1];
+ 	bool was_operational, was_active;
+-	u32 prev_pn = 0;
++	pn_t prev_pn;
+ 	int ret = 0;
+ 
++	prev_pn.full64 = 0;
++
+ 	if (!attrs[MACSEC_ATTR_IFINDEX])
+ 		return -EINVAL;
+ 
+@@ -2159,8 +2215,8 @@ static int macsec_upd_txsa(struct sk_buff *skb, struct genl_info *info)
+ 
+ 	if (tb_sa[MACSEC_SA_ATTR_PN]) {
+ 		spin_lock_bh(&tx_sa->lock);
+-		prev_pn = tx_sa->next_pn;
+-		tx_sa->next_pn = nla_get_u32(tb_sa[MACSEC_SA_ATTR_PN]);
++		prev_pn = tx_sa->next_pn_halves;
++		tx_sa->next_pn_halves.lower = nla_get_u32(tb_sa[MACSEC_SA_ATTR_PN]);
+ 		spin_unlock_bh(&tx_sa->lock);
+ 	}
+ 
+@@ -2198,7 +2254,7 @@ static int macsec_upd_txsa(struct sk_buff *skb, struct genl_info *info)
+ cleanup:
+ 	if (tb_sa[MACSEC_SA_ATTR_PN]) {
+ 		spin_lock_bh(&tx_sa->lock);
+-		tx_sa->next_pn = prev_pn;
++		tx_sa->next_pn_halves = prev_pn;
+ 		spin_unlock_bh(&tx_sa->lock);
+ 	}
+ 	tx_sa->active = was_active;
+@@ -2218,9 +2274,11 @@ static int macsec_upd_rxsa(struct sk_buff *skb, struct genl_info *info)
+ 	struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1];
+ 	struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1];
+ 	bool was_active;
+-	u32 prev_pn = 0;
++	pn_t prev_pn;
+ 	int ret = 0;
+ 
++	prev_pn.full64 = 0;
++
+ 	if (!attrs[MACSEC_ATTR_IFINDEX])
+ 		return -EINVAL;
+ 
+@@ -2243,8 +2301,8 @@ static int macsec_upd_rxsa(struct sk_buff *skb, struct genl_info *info)
+ 
+ 	if (tb_sa[MACSEC_SA_ATTR_PN]) {
+ 		spin_lock_bh(&rx_sa->lock);
+-		prev_pn = rx_sa->next_pn;
+-		rx_sa->next_pn = nla_get_u32(tb_sa[MACSEC_SA_ATTR_PN]);
++		prev_pn = rx_sa->next_pn_halves;
++		rx_sa->next_pn_halves.lower = nla_get_u32(tb_sa[MACSEC_SA_ATTR_PN]);
+ 		spin_unlock_bh(&rx_sa->lock);
+ 	}
+ 
+@@ -2277,7 +2335,7 @@ static int macsec_upd_rxsa(struct sk_buff *skb, struct genl_info *info)
+ cleanup:
+ 	if (tb_sa[MACSEC_SA_ATTR_PN]) {
+ 		spin_lock_bh(&rx_sa->lock);
+-		rx_sa->next_pn = prev_pn;
++		rx_sa->next_pn_halves = prev_pn;
+ 		spin_unlock_bh(&rx_sa->lock);
+ 	}
+ 	rx_sa->active = was_active;
+@@ -2796,7 +2854,7 @@ dump_secy(struct macsec_secy *secy, struct net_device *dev,
+ 		}
+ 
+ 		if (nla_put_u8(skb, MACSEC_SA_ATTR_AN, i) ||
+-		    nla_put_u32(skb, MACSEC_SA_ATTR_PN, tx_sa->next_pn) ||
++		    nla_put_u32(skb, MACSEC_SA_ATTR_PN, tx_sa->next_pn_halves.lower) ||
+ 		    nla_put(skb, MACSEC_SA_ATTR_KEYID, MACSEC_KEYID_LEN, tx_sa->key.id) ||
+ 		    nla_put_u8(skb, MACSEC_SA_ATTR_ACTIVE, tx_sa->active)) {
+ 			nla_nest_cancel(skb, txsa_nest);
+@@ -2900,7 +2958,7 @@ dump_secy(struct macsec_secy *secy, struct net_device *dev,
+ 			nla_nest_end(skb, attr);
+ 
+ 			if (nla_put_u8(skb, MACSEC_SA_ATTR_AN, i) ||
+-			    nla_put_u32(skb, MACSEC_SA_ATTR_PN, rx_sa->next_pn) ||
++			    nla_put_u32(skb, MACSEC_SA_ATTR_PN, rx_sa->next_pn_halves.lower) ||
+ 			    nla_put(skb, MACSEC_SA_ATTR_KEYID, MACSEC_KEYID_LEN, rx_sa->key.id) ||
+ 			    nla_put_u8(skb, MACSEC_SA_ATTR_ACTIVE, rx_sa->active)) {
+ 				nla_nest_cancel(skb, rxsa_nest);
+diff --git a/include/net/macsec.h b/include/net/macsec.h
+index 92e43db8b5667..43cd54e178770 100644
+--- a/include/net/macsec.h
++++ b/include/net/macsec.h
+@@ -11,18 +11,45 @@
+ #include <uapi/linux/if_link.h>
+ #include <uapi/linux/if_macsec.h>
+ 
++#define MACSEC_SALT_LEN 12
++#define MACSEC_NUM_AN 4 /* 2 bits for the association number */
++
+ typedef u64 __bitwise sci_t;
++typedef u32 __bitwise ssci_t;
+ 
+-#define MACSEC_NUM_AN 4 /* 2 bits for the association number */
++typedef union salt {
++	struct {
++		u32 ssci;
++		u64 pn;
++	} __packed;
++	u8 bytes[MACSEC_SALT_LEN];
++} __packed salt_t;
++
++typedef union pn {
++	struct {
++#if defined(__LITTLE_ENDIAN_BITFIELD)
++		u32 lower;
++		u32 upper;
++#elif defined(__BIG_ENDIAN_BITFIELD)
++		u32 upper;
++		u32 lower;
++#else
++#error	"Please fix <asm/byteorder.h>"
++#endif
++	};
++	u64 full64;
++} pn_t;
+ 
+ /**
+  * struct macsec_key - SA key
+  * @id: user-provided key identifier
+  * @tfm: crypto struct, key storage
++ * @salt: salt used to generate IV in XPN cipher suites
+  */
+ struct macsec_key {
+ 	u8 id[MACSEC_KEYID_LEN];
+ 	struct crypto_aead *tfm;
++	salt_t salt;
+ };
+ 
+ struct macsec_rx_sc_stats {
+@@ -64,12 +91,17 @@ struct macsec_tx_sc_stats {
+  * @next_pn: packet number expected for the next packet
+  * @lock: protects next_pn manipulations
+  * @key: key structure
++ * @ssci: short secure channel identifier
+  * @stats: per-SA stats
+  */
+ struct macsec_rx_sa {
+ 	struct macsec_key key;
++	ssci_t ssci;
+ 	spinlock_t lock;
+-	u32 next_pn;
++	union {
++		pn_t next_pn_halves;
++		u64 next_pn;
++	};
+ 	refcount_t refcnt;
+ 	bool active;
+ 	struct macsec_rx_sa_stats __percpu *stats;
+@@ -110,12 +142,17 @@ struct macsec_rx_sc {
+  * @next_pn: packet number to use for the next packet
+  * @lock: protects next_pn manipulations
+  * @key: key structure
++ * @ssci: short secure channel identifier
+  * @stats: per-SA stats
+  */
+ struct macsec_tx_sa {
+ 	struct macsec_key key;
++	ssci_t ssci;
+ 	spinlock_t lock;
+-	u32 next_pn;
++	union {
++		pn_t next_pn_halves;
++		u64 next_pn;
++	};
+ 	refcount_t refcnt;
+ 	bool active;
+ 	struct macsec_tx_sa_stats __percpu *stats;
+@@ -152,6 +189,7 @@ struct macsec_tx_sc {
+  * @key_len: length of keys used by the cipher suite
+  * @icv_len: length of ICV used by the cipher suite
+  * @validate_frames: validation mode
++ * @xpn: enable XPN for this SecY
+  * @operational: MAC_Operational flag
+  * @protect_frames: enable protection for this SecY
+  * @replay_protect: enable packet number checks on receive
+@@ -166,6 +204,7 @@ struct macsec_secy {
+ 	u16 key_len;
+ 	u16 icv_len;
+ 	enum macsec_validation_type validate_frames;
++	bool xpn;
+ 	bool operational;
+ 	bool protect_frames;
+ 	bool replay_protect;
+-- 
+cgit 1.2.3-1.el7
+
diff --git a/target/linux/mediatek/patches-5.4/999-1763-v5.18-net-macsec-Netlink-support-of-XPN-cipher-suites-IEEE802.1AEbw.patch b/target/linux/mediatek/patches-5.4/999-1763-v5.18-net-macsec-Netlink-support-of-XPN-cipher-suites-IEEE802.1AEbw.patch
new file mode 100644
index 0000000..2d00be3
--- /dev/null
+++ b/target/linux/mediatek/patches-5.4/999-1763-v5.18-net-macsec-Netlink-support-of-XPN-cipher-suites-IEEE802.1AEbw.patch
@@ -0,0 +1,452 @@
+From 48ef50fa866aae087f63c7de8a47e76537f88691 Mon Sep 17 00:00:00 2001
+From: Era Mayflower <mayflowerera@gmail.com>
+Date: Mon, 9 Mar 2020 19:47:02 +0000
+Subject: macsec: Netlink support of XPN cipher suites (IEEE 802.1AEbw)
+
+Netlink support of extended packet number cipher suites,
+allows adding and updating XPN macsec interfaces.
+
+Added support in:
+    * Creating interfaces with GCM-AES-XPN-128 and GCM-AES-XPN-256 suites.
+    * Setting and getting 64bit packet numbers with of SAs.
+    * Setting (only on SA creation) and getting ssci of SAs.
+    * Setting salt when installing a SAK.
+
+Added 2 cipher suite identifiers according to 802.1AE-2018 table 14-1:
+    * MACSEC_CIPHER_ID_GCM_AES_XPN_128
+    * MACSEC_CIPHER_ID_GCM_AES_XPN_256
+
+In addition, added 2 new netlink attribute types:
+    * MACSEC_SA_ATTR_SSCI
+    * MACSEC_SA_ATTR_SALT
+
+Depends on: macsec: Support XPN frame handling - IEEE 802.1AEbw.
+
+Signed-off-by: Era Mayflower <mayflowerera@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+---
+ drivers/net/macsec.c           | 161 +++++++++++++++++++++++++++++++++++++----
+ include/net/macsec.h           |   3 +
+ include/uapi/linux/if_macsec.h |   8 +-
+ 3 files changed, 157 insertions(+), 15 deletions(-)
+
+diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
+index 6c71e250cccb0..49b138e7aeac3 100644
+--- a/drivers/net/macsec.c
++++ b/drivers/net/macsec.c
+@@ -240,11 +240,13 @@ static struct macsec_cb *macsec_skb_cb(struct sk_buff *skb)
+ #define MACSEC_PORT_ES (htons(0x0001))
+ #define MACSEC_PORT_SCB (0x0000)
+ #define MACSEC_UNDEF_SCI ((__force sci_t)0xffffffffffffffffULL)
++#define MACSEC_UNDEF_SSCI ((__force ssci_t)0xffffffff)
+ 
+ #define MACSEC_GCM_AES_128_SAK_LEN 16
+ #define MACSEC_GCM_AES_256_SAK_LEN 32
+ 
+ #define DEFAULT_SAK_LEN MACSEC_GCM_AES_128_SAK_LEN
++#define DEFAULT_XPN false
+ #define DEFAULT_SEND_SCI true
+ #define DEFAULT_ENCRYPT false
+ #define DEFAULT_ENCODING_SA 0
+@@ -1311,6 +1313,7 @@ static int init_rx_sa(struct macsec_rx_sa *rx_sa, char *sak, int key_len,
+ 		return PTR_ERR(rx_sa->key.tfm);
+ 	}
+ 
++	rx_sa->ssci = MACSEC_UNDEF_SSCI;
+ 	rx_sa->active = false;
+ 	rx_sa->next_pn = 1;
+ 	refcount_set(&rx_sa->refcnt, 1);
+@@ -1409,6 +1412,7 @@ static int init_tx_sa(struct macsec_tx_sa *tx_sa, char *sak, int key_len,
+ 		return PTR_ERR(tx_sa->key.tfm);
+ 	}
+ 
++	tx_sa->ssci = MACSEC_UNDEF_SSCI;
+ 	tx_sa->active = false;
+ 	refcount_set(&tx_sa->refcnt, 1);
+ 	spin_lock_init(&tx_sa->lock);
+@@ -1452,6 +1456,16 @@ static int nla_put_sci(struct sk_buff *skb, int attrtype, sci_t value,
+ 	return nla_put_u64_64bit(skb, attrtype, (__force u64)value, padattr);
+ }
+ 
++static ssci_t nla_get_ssci(const struct nlattr *nla)
++{
++	return (__force ssci_t)nla_get_u32(nla);
++}
++
++static int nla_put_ssci(struct sk_buff *skb, int attrtype, ssci_t value)
++{
++	return nla_put_u32(skb, attrtype, (__force u64)value);
++}
++
+ static struct macsec_tx_sa *get_txsa_from_nl(struct net *net,
+ 					     struct nlattr **attrs,
+ 					     struct nlattr **tb_sa,
+@@ -1567,11 +1581,14 @@ static const struct nla_policy macsec_genl_rxsc_policy[NUM_MACSEC_RXSC_ATTR] = {
+ static const struct nla_policy macsec_genl_sa_policy[NUM_MACSEC_SA_ATTR] = {
+ 	[MACSEC_SA_ATTR_AN] = { .type = NLA_U8 },
+ 	[MACSEC_SA_ATTR_ACTIVE] = { .type = NLA_U8 },
+-	[MACSEC_SA_ATTR_PN] = { .type = NLA_U32 },
++	[MACSEC_SA_ATTR_PN] = { .type = NLA_MIN_LEN, .len = 4 },
+ 	[MACSEC_SA_ATTR_KEYID] = { .type = NLA_BINARY,
+ 				   .len = MACSEC_KEYID_LEN, },
+ 	[MACSEC_SA_ATTR_KEY] = { .type = NLA_BINARY,
+ 				 .len = MACSEC_MAX_KEY_LEN, },
++	[MACSEC_SA_ATTR_SSCI] = { .type = NLA_U32 },
++	[MACSEC_SA_ATTR_SALT] = { .type = NLA_BINARY,
++				  .len = MACSEC_SALT_LEN, },
+ };
+ 
+ static const struct nla_policy macsec_genl_offload_policy[NUM_MACSEC_OFFLOAD_ATTR] = {
+@@ -1644,7 +1661,8 @@ static bool validate_add_rxsa(struct nlattr **attrs)
+ 	if (nla_get_u8(attrs[MACSEC_SA_ATTR_AN]) >= MACSEC_NUM_AN)
+ 		return false;
+ 
+-	if (attrs[MACSEC_SA_ATTR_PN] && nla_get_u32(attrs[MACSEC_SA_ATTR_PN]) == 0)
++	if (attrs[MACSEC_SA_ATTR_PN] &&
++	    *(u64 *)nla_data(attrs[MACSEC_SA_ATTR_PN]) == 0)
+ 		return false;
+ 
+ 	if (attrs[MACSEC_SA_ATTR_ACTIVE]) {
+@@ -1666,6 +1684,7 @@ static int macsec_add_rxsa(struct sk_buff *skb, struct genl_info *info)
+ 	struct macsec_rx_sc *rx_sc;
+ 	struct macsec_rx_sa *rx_sa;
+ 	unsigned char assoc_num;
++	int pn_len;
+ 	struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1];
+ 	struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1];
+ 	int err;
+@@ -1698,6 +1717,29 @@ static int macsec_add_rxsa(struct sk_buff *skb, struct genl_info *info)
+ 		return -EINVAL;
+ 	}
+ 
++	pn_len = secy->xpn ? MACSEC_XPN_PN_LEN : MACSEC_DEFAULT_PN_LEN;
++	if (nla_len(tb_sa[MACSEC_SA_ATTR_PN]) != pn_len) {
++		pr_notice("macsec: nl: add_rxsa: bad pn length: %d != %d\n",
++			  nla_len(tb_sa[MACSEC_SA_ATTR_PN]), pn_len);
++		rtnl_unlock();
++		return -EINVAL;
++	}
++
++	if (secy->xpn) {
++		if (!tb_sa[MACSEC_SA_ATTR_SSCI] || !tb_sa[MACSEC_SA_ATTR_SALT]) {
++			rtnl_unlock();
++			return -EINVAL;
++		}
++
++		if (nla_len(tb_sa[MACSEC_SA_ATTR_SALT]) != MACSEC_SALT_LEN) {
++			pr_notice("macsec: nl: add_rxsa: bad salt length: %d != %d\n",
++				  nla_len(tb_sa[MACSEC_SA_ATTR_SALT]),
++				  MACSEC_SA_ATTR_SALT);
++			rtnl_unlock();
++			return -EINVAL;
++		}
++	}
++
+ 	rx_sa = rtnl_dereference(rx_sc->sa[assoc_num]);
+ 	if (rx_sa) {
+ 		rtnl_unlock();
+@@ -1720,7 +1762,7 @@ static int macsec_add_rxsa(struct sk_buff *skb, struct genl_info *info)
+ 
+ 	if (tb_sa[MACSEC_SA_ATTR_PN]) {
+ 		spin_lock_bh(&rx_sa->lock);
+-		rx_sa->next_pn_halves.lower = nla_get_u32(tb_sa[MACSEC_SA_ATTR_PN]);
++		rx_sa->next_pn = nla_get_u64(tb_sa[MACSEC_SA_ATTR_PN]);
+ 		spin_unlock_bh(&rx_sa->lock);
+ 	}
+ 
+@@ -1750,6 +1792,12 @@ static int macsec_add_rxsa(struct sk_buff *skb, struct genl_info *info)
+ 			goto cleanup;
+ 	}
+ 
++	if (secy->xpn) {
++		rx_sa->ssci = nla_get_ssci(tb_sa[MACSEC_SA_ATTR_SSCI]);
++		nla_memcpy(rx_sa->key.salt.bytes, tb_sa[MACSEC_SA_ATTR_SALT],
++			   MACSEC_SALT_LEN);
++	}
++
+ 	nla_memcpy(rx_sa->key.id, tb_sa[MACSEC_SA_ATTR_KEYID], MACSEC_KEYID_LEN);
+ 	rcu_assign_pointer(rx_sc->sa[assoc_num], rx_sa);
+ 
+@@ -1874,6 +1922,7 @@ static int macsec_add_txsa(struct sk_buff *skb, struct genl_info *info)
+ 	struct macsec_tx_sc *tx_sc;
+ 	struct macsec_tx_sa *tx_sa;
+ 	unsigned char assoc_num;
++	int pn_len;
+ 	struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1];
+ 	bool was_operational;
+ 	int err;
+@@ -1906,6 +1955,29 @@ static int macsec_add_txsa(struct sk_buff *skb, struct genl_info *info)
+ 		return -EINVAL;
+ 	}
+ 
++	pn_len = secy->xpn ? MACSEC_XPN_PN_LEN : MACSEC_DEFAULT_PN_LEN;
++	if (nla_len(tb_sa[MACSEC_SA_ATTR_PN]) != pn_len) {
++		pr_notice("macsec: nl: add_txsa: bad pn length: %d != %d\n",
++			  nla_len(tb_sa[MACSEC_SA_ATTR_PN]), pn_len);
++		rtnl_unlock();
++		return -EINVAL;
++	}
++
++	if (secy->xpn) {
++		if (!tb_sa[MACSEC_SA_ATTR_SSCI] || !tb_sa[MACSEC_SA_ATTR_SALT]) {
++			rtnl_unlock();
++			return -EINVAL;
++		}
++
++		if (nla_len(tb_sa[MACSEC_SA_ATTR_SALT]) != MACSEC_SALT_LEN) {
++			pr_notice("macsec: nl: add_txsa: bad salt length: %d != %d\n",
++				  nla_len(tb_sa[MACSEC_SA_ATTR_SALT]),
++				  MACSEC_SA_ATTR_SALT);
++			rtnl_unlock();
++			return -EINVAL;
++		}
++	}
++
+ 	tx_sa = rtnl_dereference(tx_sc->sa[assoc_num]);
+ 	if (tx_sa) {
+ 		rtnl_unlock();
+@@ -1927,7 +1999,7 @@ static int macsec_add_txsa(struct sk_buff *skb, struct genl_info *info)
+ 	}
+ 
+ 	spin_lock_bh(&tx_sa->lock);
+-	tx_sa->next_pn_halves.lower = nla_get_u32(tb_sa[MACSEC_SA_ATTR_PN]);
++	tx_sa->next_pn = nla_get_u64(tb_sa[MACSEC_SA_ATTR_PN]);
+ 	spin_unlock_bh(&tx_sa->lock);
+ 
+ 	if (tb_sa[MACSEC_SA_ATTR_ACTIVE])
+@@ -1958,6 +2030,12 @@ static int macsec_add_txsa(struct sk_buff *skb, struct genl_info *info)
+ 			goto cleanup;
+ 	}
+ 
++	if (secy->xpn) {
++		tx_sa->ssci = nla_get_ssci(tb_sa[MACSEC_SA_ATTR_SSCI]);
++		nla_memcpy(tx_sa->key.salt.bytes, tb_sa[MACSEC_SA_ATTR_SALT],
++			   MACSEC_SALT_LEN);
++	}
++
+ 	nla_memcpy(tx_sa->key.id, tb_sa[MACSEC_SA_ATTR_KEYID], MACSEC_KEYID_LEN);
+ 	rcu_assign_pointer(tx_sc->sa[assoc_num], tx_sa);
+ 
+@@ -2164,7 +2242,9 @@ static bool validate_upd_sa(struct nlattr **attrs)
+ {
+ 	if (!attrs[MACSEC_SA_ATTR_AN] ||
+ 	    attrs[MACSEC_SA_ATTR_KEY] ||
+-	    attrs[MACSEC_SA_ATTR_KEYID])
++	    attrs[MACSEC_SA_ATTR_KEYID] ||
++	    attrs[MACSEC_SA_ATTR_SSCI] ||
++	    attrs[MACSEC_SA_ATTR_SALT])
+ 		return false;
+ 
+ 	if (nla_get_u8(attrs[MACSEC_SA_ATTR_AN]) >= MACSEC_NUM_AN)
+@@ -2214,9 +2294,19 @@ static int macsec_upd_txsa(struct sk_buff *skb, struct genl_info *info)
+ 	}
+ 
+ 	if (tb_sa[MACSEC_SA_ATTR_PN]) {
++		int pn_len;
++
++		pn_len = secy->xpn ? MACSEC_XPN_PN_LEN : MACSEC_DEFAULT_PN_LEN;
++		if (nla_len(tb_sa[MACSEC_SA_ATTR_PN]) != pn_len) {
++			pr_notice("macsec: nl: upd_txsa: bad pn length: %d != %d\n",
++				  nla_len(tb_sa[MACSEC_SA_ATTR_PN]), pn_len);
++			rtnl_unlock();
++			return -EINVAL;
++		}
++
+ 		spin_lock_bh(&tx_sa->lock);
+ 		prev_pn = tx_sa->next_pn_halves;
+-		tx_sa->next_pn_halves.lower = nla_get_u32(tb_sa[MACSEC_SA_ATTR_PN]);
++		tx_sa->next_pn = nla_get_u64(tb_sa[MACSEC_SA_ATTR_PN]);
+ 		spin_unlock_bh(&tx_sa->lock);
+ 	}
+ 
+@@ -2300,9 +2390,19 @@ static int macsec_upd_rxsa(struct sk_buff *skb, struct genl_info *info)
+ 	}
+ 
+ 	if (tb_sa[MACSEC_SA_ATTR_PN]) {
++		int pn_len;
++
++		pn_len = secy->xpn ? MACSEC_XPN_PN_LEN : MACSEC_DEFAULT_PN_LEN;
++		if (nla_len(tb_sa[MACSEC_SA_ATTR_PN]) != pn_len) {
++			pr_notice("macsec: nl: upd_rxsa: bad pn length: %d != %d\n",
++				  nla_len(tb_sa[MACSEC_SA_ATTR_PN]), pn_len);
++			rtnl_unlock();
++			return -EINVAL;
++		}
++
+ 		spin_lock_bh(&rx_sa->lock);
+ 		prev_pn = rx_sa->next_pn_halves;
+-		rx_sa->next_pn_halves.lower = nla_get_u32(tb_sa[MACSEC_SA_ATTR_PN]);
++		rx_sa->next_pn = nla_get_u64(tb_sa[MACSEC_SA_ATTR_PN]);
+ 		spin_unlock_bh(&rx_sa->lock);
+ 	}
+ 
+@@ -2749,10 +2849,10 @@ static int nla_put_secy(struct macsec_secy *secy, struct sk_buff *skb)
+ 
+ 	switch (secy->key_len) {
+ 	case MACSEC_GCM_AES_128_SAK_LEN:
+-		csid = MACSEC_DEFAULT_CIPHER_ID;
++		csid = secy->xpn ? MACSEC_CIPHER_ID_GCM_AES_XPN_128 : MACSEC_DEFAULT_CIPHER_ID;
+ 		break;
+ 	case MACSEC_GCM_AES_256_SAK_LEN:
+-		csid = MACSEC_CIPHER_ID_GCM_AES_256;
++		csid = secy->xpn ? MACSEC_CIPHER_ID_GCM_AES_XPN_256 : MACSEC_CIPHER_ID_GCM_AES_256;
+ 		break;
+ 	default:
+ 		goto cancel;
+@@ -2843,6 +2943,8 @@ dump_secy(struct macsec_secy *secy, struct net_device *dev,
+ 	for (i = 0, j = 1; i < MACSEC_NUM_AN; i++) {
+ 		struct macsec_tx_sa *tx_sa = rtnl_dereference(tx_sc->sa[i]);
+ 		struct nlattr *txsa_nest;
++		u64 pn;
++		int pn_len;
+ 
+ 		if (!tx_sa)
+ 			continue;
+@@ -2853,9 +2955,18 @@ dump_secy(struct macsec_secy *secy, struct net_device *dev,
+ 			goto nla_put_failure;
+ 		}
+ 
++		if (secy->xpn) {
++			pn = tx_sa->next_pn;
++			pn_len = MACSEC_XPN_PN_LEN;
++		} else {
++			pn = tx_sa->next_pn_halves.lower;
++			pn_len = MACSEC_DEFAULT_PN_LEN;
++		}
++
+ 		if (nla_put_u8(skb, MACSEC_SA_ATTR_AN, i) ||
+-		    nla_put_u32(skb, MACSEC_SA_ATTR_PN, tx_sa->next_pn_halves.lower) ||
++		    nla_put(skb, MACSEC_SA_ATTR_PN, pn_len, &pn) ||
+ 		    nla_put(skb, MACSEC_SA_ATTR_KEYID, MACSEC_KEYID_LEN, tx_sa->key.id) ||
++		    (secy->xpn && nla_put_ssci(skb, MACSEC_SA_ATTR_SSCI, tx_sa->ssci)) ||
+ 		    nla_put_u8(skb, MACSEC_SA_ATTR_ACTIVE, tx_sa->active)) {
+ 			nla_nest_cancel(skb, txsa_nest);
+ 			nla_nest_cancel(skb, txsa_list);
+@@ -2928,6 +3039,8 @@ dump_secy(struct macsec_secy *secy, struct net_device *dev,
+ 		for (i = 0, k = 1; i < MACSEC_NUM_AN; i++) {
+ 			struct macsec_rx_sa *rx_sa = rtnl_dereference(rx_sc->sa[i]);
+ 			struct nlattr *rxsa_nest;
++			u64 pn;
++			int pn_len;
+ 
+ 			if (!rx_sa)
+ 				continue;
+@@ -2957,9 +3070,18 @@ dump_secy(struct macsec_secy *secy, struct net_device *dev,
+ 			}
+ 			nla_nest_end(skb, attr);
+ 
++			if (secy->xpn) {
++				pn = rx_sa->next_pn;
++				pn_len = MACSEC_XPN_PN_LEN;
++			} else {
++				pn = rx_sa->next_pn_halves.lower;
++				pn_len = MACSEC_DEFAULT_PN_LEN;
++			}
++
+ 			if (nla_put_u8(skb, MACSEC_SA_ATTR_AN, i) ||
+-			    nla_put_u32(skb, MACSEC_SA_ATTR_PN, rx_sa->next_pn_halves.lower) ||
++			    nla_put(skb, MACSEC_SA_ATTR_PN, pn_len, &pn) ||
+ 			    nla_put(skb, MACSEC_SA_ATTR_KEYID, MACSEC_KEYID_LEN, rx_sa->key.id) ||
++			    (secy->xpn && nla_put_ssci(skb, MACSEC_SA_ATTR_SSCI, rx_sa->ssci)) ||
+ 			    nla_put_u8(skb, MACSEC_SA_ATTR_ACTIVE, rx_sa->active)) {
+ 				nla_nest_cancel(skb, rxsa_nest);
+ 				nla_nest_cancel(skb, rxsc_nest);
+@@ -3503,9 +3625,19 @@ static int macsec_changelink_common(struct net_device *dev,
+ 		case MACSEC_CIPHER_ID_GCM_AES_128:
+ 		case MACSEC_DEFAULT_CIPHER_ID:
+ 			secy->key_len = MACSEC_GCM_AES_128_SAK_LEN;
++			secy->xpn = false;
+ 			break;
+ 		case MACSEC_CIPHER_ID_GCM_AES_256:
+ 			secy->key_len = MACSEC_GCM_AES_256_SAK_LEN;
++			secy->xpn = false;
++			break;
++		case MACSEC_CIPHER_ID_GCM_AES_XPN_128:
++			secy->key_len = MACSEC_GCM_AES_128_SAK_LEN;
++			secy->xpn = true;
++			break;
++		case MACSEC_CIPHER_ID_GCM_AES_XPN_256:
++			secy->key_len = MACSEC_GCM_AES_256_SAK_LEN;
++			secy->xpn = true;
+ 			break;
+ 		default:
+ 			return -EINVAL;
+@@ -3695,6 +3827,7 @@ static int macsec_add_dev(struct net_device *dev, sci_t sci, u8 icv_len)
+ 	secy->validate_frames = MACSEC_VALIDATE_DEFAULT;
+ 	secy->protect_frames = true;
+ 	secy->replay_protect = false;
++	secy->xpn = DEFAULT_XPN;
+ 
+ 	secy->sci = sci;
+ 	secy->tx_sc.active = true;
+@@ -3824,6 +3957,8 @@ static int macsec_validate_attr(struct nlattr *tb[], struct nlattr *data[],
+ 	switch (csid) {
+ 	case MACSEC_CIPHER_ID_GCM_AES_128:
+ 	case MACSEC_CIPHER_ID_GCM_AES_256:
++	case MACSEC_CIPHER_ID_GCM_AES_XPN_128:
++	case MACSEC_CIPHER_ID_GCM_AES_XPN_256:
+ 	case MACSEC_DEFAULT_CIPHER_ID:
+ 		if (icv_len < MACSEC_MIN_ICV_LEN ||
+ 		    icv_len > MACSEC_STD_ICV_LEN)
+@@ -3897,10 +4032,10 @@ static int macsec_fill_info(struct sk_buff *skb,
+ 
+ 	switch (secy->key_len) {
+ 	case MACSEC_GCM_AES_128_SAK_LEN:
+-		csid = MACSEC_DEFAULT_CIPHER_ID;
++		csid = secy->xpn ? MACSEC_CIPHER_ID_GCM_AES_XPN_128 : MACSEC_DEFAULT_CIPHER_ID;
+ 		break;
+ 	case MACSEC_GCM_AES_256_SAK_LEN:
+-		csid = MACSEC_CIPHER_ID_GCM_AES_256;
++		csid = secy->xpn ? MACSEC_CIPHER_ID_GCM_AES_XPN_256 : MACSEC_CIPHER_ID_GCM_AES_256;
+ 		break;
+ 	default:
+ 		goto nla_put_failure;
+diff --git a/include/net/macsec.h b/include/net/macsec.h
+index 43cd54e178770..2e4780dbf5c6a 100644
+--- a/include/net/macsec.h
++++ b/include/net/macsec.h
+@@ -11,6 +11,9 @@
+ #include <uapi/linux/if_link.h>
+ #include <uapi/linux/if_macsec.h>
+ 
++#define MACSEC_DEFAULT_PN_LEN 4
++#define MACSEC_XPN_PN_LEN 8
++
+ #define MACSEC_SALT_LEN 12
+ #define MACSEC_NUM_AN 4 /* 2 bits for the association number */
+ 
+diff --git a/include/uapi/linux/if_macsec.h b/include/uapi/linux/if_macsec.h
+index 1d63c43c38cca..3af2aa069a367 100644
+--- a/include/uapi/linux/if_macsec.h
++++ b/include/uapi/linux/if_macsec.h
+@@ -22,9 +22,11 @@
+ 
+ #define MACSEC_KEYID_LEN 16
+ 
+-/* cipher IDs as per IEEE802.1AEbn-2011 */
++/* cipher IDs as per IEEE802.1AE-2018 (Table 14-1) */
+ #define MACSEC_CIPHER_ID_GCM_AES_128 0x0080C20001000001ULL
+ #define MACSEC_CIPHER_ID_GCM_AES_256 0x0080C20001000002ULL
++#define MACSEC_CIPHER_ID_GCM_AES_XPN_128 0x0080C20001000003ULL
++#define MACSEC_CIPHER_ID_GCM_AES_XPN_256 0x0080C20001000004ULL
+ 
+ /* deprecated cipher ID for GCM-AES-128 */
+ #define MACSEC_DEFAULT_CIPHER_ID     0x0080020001000001ULL
+@@ -88,11 +90,13 @@ enum macsec_sa_attrs {
+ 	MACSEC_SA_ATTR_UNSPEC,
+ 	MACSEC_SA_ATTR_AN,     /* config/dump, u8 0..3 */
+ 	MACSEC_SA_ATTR_ACTIVE, /* config/dump, u8 0..1 */
+-	MACSEC_SA_ATTR_PN,     /* config/dump, u32 */
++	MACSEC_SA_ATTR_PN,     /* config/dump, u32/u64 (u64 if XPN) */
+ 	MACSEC_SA_ATTR_KEY,    /* config, data */
+ 	MACSEC_SA_ATTR_KEYID,  /* config/dump, 128-bit */
+ 	MACSEC_SA_ATTR_STATS,  /* dump, nested, macsec_sa_stats_attr */
+ 	MACSEC_SA_ATTR_PAD,
++	MACSEC_SA_ATTR_SSCI,   /* config/dump, u32 - XPN only */
++	MACSEC_SA_ATTR_SALT,   /* config, 96-bit - XPN only */
+ 	__MACSEC_SA_ATTR_END,
+ 	NUM_MACSEC_SA_ATTR = __MACSEC_SA_ATTR_END,
+ 	MACSEC_SA_ATTR_MAX = __MACSEC_SA_ATTR_END - 1,
+-- 
+cgit 1.2.3-1.el7
+
diff --git a/target/linux/mediatek/patches-5.4/999-1764-v5.18-net-macsec-restrict-to-ethernet-devices.patch b/target/linux/mediatek/patches-5.4/999-1764-v5.18-net-macsec-restrict-to-ethernet-devices.patch
new file mode 100644
index 0000000..bae5efa
--- /dev/null
+++ b/target/linux/mediatek/patches-5.4/999-1764-v5.18-net-macsec-restrict-to-ethernet-devices.patch
@@ -0,0 +1,41 @@
+From b06d072ccc4b1acd0147b17914b7ad1caa1818bb Mon Sep 17 00:00:00 2001
+From: Willem de Bruijn <willemb@google.com>
+Date: Sun, 22 Mar 2020 13:51:13 -0400
+Subject: macsec: restrict to ethernet devices
+
+Only attach macsec to ethernet devices.
+
+Syzbot was able to trigger a KMSAN warning in macsec_handle_frame
+by attaching to a phonet device.
+
+Macvlan has a similar check in macvlan_port_create.
+
+v1->v2
+  - fix commit message typo
+
+Reported-by: syzbot <syzkaller@googlegroups.com>
+Signed-off-by: Willem de Bruijn <willemb@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+---
+ drivers/net/macsec.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/drivers/net/macsec.c
++++ b/drivers/net/macsec.c
+@@ -20,6 +20,7 @@
+ #include <net/macsec.h>
+ #include <linux/phy.h>
+ #include <linux/byteorder/generic.h>
++#include <linux/if_arp.h>
+ 
+ #include <uapi/linux/if_macsec.h>
+ 
+@@ -3859,6 +3860,8 @@ static int macsec_newlink(struct net *ne
+ 	real_dev = __dev_get_by_index(net, nla_get_u32(tb[IFLA_LINK]));
+ 	if (!real_dev)
+ 		return -ENODEV;
++	if (real_dev->type != ARPHRD_ETHER)
++		return -EINVAL;
+ 
+ 	dev->priv_flags |= IFF_MACSEC;
+ 
diff --git a/target/linux/mediatek/patches-5.4/999-1765-01-v5.18-net-introduce-the-MACSEC-netdev-feature.patch b/target/linux/mediatek/patches-5.4/999-1765-01-v5.18-net-introduce-the-MACSEC-netdev-feature.patch
new file mode 100644
index 0000000..fa99dfb
--- /dev/null
+++ b/target/linux/mediatek/patches-5.4/999-1765-01-v5.18-net-introduce-the-MACSEC-netdev-feature.patch
@@ -0,0 +1,39 @@
+From 5908220b2b3d6918f88cd645a39e1dcb84d1c5d9 Mon Sep 17 00:00:00 2001
+From: Antoine Tenart <antoine.tenart@bootlin.com>
+Date: Wed, 25 Mar 2020 15:52:30 +0300
+Subject: net: introduce the MACSEC netdev feature
+
+This patch introduce a new netdev feature, which will be used by drivers
+to state they can perform MACsec transformations in hardware.
+
+The patchset was gathered by Mark, macsec functinality itself
+was implemented by Dmitry, Mark and Pavel Belous.
+
+Signed-off-by: Antoine Tenart <antoine.tenart@bootlin.com>
+Signed-off-by: Mark Starovoytov <mstarovoitov@marvell.com>
+Signed-off-by: Igor Russkikh <irusskikh@marvell.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+---
+ include/linux/netdev_features.h | 3 +++
+ net/ethtool/common.c            | 1 +
+ 2 files changed, 4 insertions(+)
+
+--- a/include/linux/netdev_features.h
++++ b/include/linux/netdev_features.h
+@@ -81,6 +81,8 @@ enum {
+ 	NETIF_F_GRO_HW_BIT,		/* Hardware Generic receive offload */
+ 	NETIF_F_HW_TLS_RECORD_BIT,	/* Offload TLS record */
+ 
++	NETIF_F_HW_MACSEC_BIT,		/* Offload MACsec operations */
++
+ 	/*
+ 	 * Add your fresh new feature above and remember to update
+ 	 * netdev_features_strings[] in net/ethtool/common.c and maybe
+@@ -150,6 +152,7 @@ enum {
+ #define NETIF_F_GSO_UDP_L4	__NETIF_F(GSO_UDP_L4)
+ #define NETIF_F_HW_TLS_TX	__NETIF_F(HW_TLS_TX)
+ #define NETIF_F_HW_TLS_RX	__NETIF_F(HW_TLS_RX)
++#define NETIF_F_HW_MACSEC	__NETIF_F(HW_MACSEC)
+ 
+ /* Finds the next feature with the highest number of the range of start till 0.
+  */
diff --git a/target/linux/mediatek/patches-5.4/999-1766-02-v5.18-net-add-a-reference-to-MACsec-ops-in-net_device.patch b/target/linux/mediatek/patches-5.4/999-1766-02-v5.18-net-add-a-reference-to-MACsec-ops-in-net_device.patch
new file mode 100644
index 0000000..c3372fd
--- /dev/null
+++ b/target/linux/mediatek/patches-5.4/999-1766-02-v5.18-net-add-a-reference-to-MACsec-ops-in-net_device.patch
@@ -0,0 +1,49 @@
+From 30e9bb8472f4454d0544020574bb03d96ffa0e52 Mon Sep 17 00:00:00 2001
+From: Antoine Tenart <antoine.tenart@bootlin.com>
+Date: Wed, 25 Mar 2020 15:52:31 +0300
+Subject: net: add a reference to MACsec ops in net_device
+
+This patch adds a reference to MACsec ops to the net_device structure,
+allowing net device drivers to implement offloading operations for
+MACsec.
+
+Signed-off-by: Antoine Tenart <antoine.tenart@bootlin.com>
+Signed-off-by: Mark Starovoytov <mstarovoitov@marvell.com>
+Signed-off-by: Igor Russkikh <irusskikh@marvell.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+---
+ include/linux/netdevice.h | 9 +++++++++
+ 1 file changed, 9 insertions(+)
+
+--- a/include/linux/netdevice.h
++++ b/include/linux/netdevice.h
+@@ -53,6 +53,8 @@ struct netpoll_info;
+ struct device;
+ struct phy_device;
+ struct dsa_port;
++struct macsec_context;
++struct macsec_ops;
+ 
+ struct sfp_bus;
+ /* 802.11 specific */
+@@ -1805,6 +1807,8 @@ enum netdev_ml_priv_type {
+  *
+  *	@threaded:	napi threaded mode is enabled
+  *
++ *	@macsec_ops:    MACsec offloading ops
++ *
+  *	FIXME: cleanup struct net_device such that network protocol info
+  *	moves out.
+  */
+@@ -2109,6 +2113,11 @@ struct net_device {
+ 	bool			proto_down;
+ 	unsigned		wol_enabled:1;
+ 	unsigned		threaded:1;
++
++#if IS_ENABLED(CONFIG_MACSEC)
++	/* MACsec management functions */
++	const struct macsec_ops *macsec_ops;
++#endif
+ };
+ #define to_net_dev(d) container_of(d, struct net_device, dev)
+ 
diff --git a/target/linux/mediatek/patches-5.4/999-1767-03-v5.18-net-macsec-allow-to-reference-a-netdev-from-a-MACsec-context.patch b/target/linux/mediatek/patches-5.4/999-1767-03-v5.18-net-macsec-allow-to-reference-a-netdev-from-a-MACsec-context.patch
new file mode 100644
index 0000000..b3ef8df
--- /dev/null
+++ b/target/linux/mediatek/patches-5.4/999-1767-03-v5.18-net-macsec-allow-to-reference-a-netdev-from-a-MACsec-context.patch
@@ -0,0 +1,35 @@
+From 8fa9137180b2fd8482b671f7e0bd8cf7538cbf59 Mon Sep 17 00:00:00 2001
+From: Antoine Tenart <antoine.tenart@bootlin.com>
+Date: Wed, 25 Mar 2020 15:52:32 +0300
+Subject: net: macsec: allow to reference a netdev from a MACsec context
+
+This patch allows to reference a net_device from a MACsec context. This
+is needed to allow implementing MACsec operations in net device drivers.
+
+Signed-off-by: Antoine Tenart <antoine.tenart@bootlin.com>
+Signed-off-by: Mark Starovoytov <mstarovoitov@marvell.com>
+Signed-off-by: Igor Russkikh <irusskikh@marvell.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+---
+ include/net/macsec.h | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+diff --git a/include/net/macsec.h b/include/net/macsec.h
+index 2e4780dbf5c6a..71de2c863df70 100644
+--- a/include/net/macsec.h
++++ b/include/net/macsec.h
+@@ -220,7 +220,10 @@ struct macsec_secy {
+  * struct macsec_context - MACsec context for hardware offloading
+  */
+ struct macsec_context {
+-	struct phy_device *phydev;
++	union {
++		struct net_device *netdev;
++		struct phy_device *phydev;
++	};
+ 	enum macsec_offload offload;
+ 
+ 	struct macsec_secy *secy;
+-- 
+cgit 1.2.3-1.el7
+
diff --git a/target/linux/mediatek/patches-5.4/999-1768-04-v5.18-net-macsec-add-support-for-offloading-to-the-MAC.patch b/target/linux/mediatek/patches-5.4/999-1768-04-v5.18-net-macsec-add-support-for-offloading-to-the-MAC.patch
new file mode 100644
index 0000000..6b5dd5d
--- /dev/null
+++ b/target/linux/mediatek/patches-5.4/999-1768-04-v5.18-net-macsec-add-support-for-offloading-to-the-MAC.patch
@@ -0,0 +1,77 @@
+From 21114b7feec29e4425a3ac48a037569c016a46c8 Mon Sep 17 00:00:00 2001
+From: Antoine Tenart <antoine.tenart@bootlin.com>
+Date: Wed, 25 Mar 2020 15:52:33 +0300
+Subject: net: macsec: add support for offloading to the MAC
+
+This patch adds a new MACsec offloading option, MACSEC_OFFLOAD_MAC,
+allowing a user to select a MAC as a provider for MACsec offloading
+operations.
+
+Signed-off-by: Antoine Tenart <antoine.tenart@bootlin.com>
+Signed-off-by: Mark Starovoytov <mstarovoitov@marvell.com>
+Signed-off-by: Igor Russkikh <irusskikh@marvell.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+---
+ drivers/net/macsec.c               | 13 +++++++++++--
+ include/uapi/linux/if_link.h       |  1 +
+ tools/include/uapi/linux/if_link.h |  1 +
+ 3 files changed, 13 insertions(+), 2 deletions(-)
+
+--- a/drivers/net/macsec.c
++++ b/drivers/net/macsec.c
+@@ -339,7 +339,8 @@ static void macsec_set_shortlen(struct m
+ /* Checks if a MACsec interface is being offloaded to an hardware engine */
+ static bool macsec_is_offloaded(struct macsec_dev *macsec)
+ {
+-	if (macsec->offload == MACSEC_OFFLOAD_PHY)
++	if (macsec->offload == MACSEC_OFFLOAD_MAC ||
++	    macsec->offload == MACSEC_OFFLOAD_PHY)
+ 		return true;
+ 
+ 	return false;
+@@ -355,6 +356,9 @@ static bool macsec_check_offload(enum ma
+ 	if (offload == MACSEC_OFFLOAD_PHY)
+ 		return macsec->real_dev->phydev &&
+ 		       macsec->real_dev->phydev->macsec_ops;
++	else if (offload == MACSEC_OFFLOAD_MAC)
++		return macsec->real_dev->features & NETIF_F_HW_MACSEC &&
++		       macsec->real_dev->macsec_ops;
+ 
+ 	return false;
+ }
+@@ -369,9 +373,14 @@ static const struct macsec_ops *__macsec
+ 
+ 		if (offload == MACSEC_OFFLOAD_PHY)
+ 			ctx->phydev = macsec->real_dev->phydev;
++		else if (offload == MACSEC_OFFLOAD_MAC)
++			ctx->netdev = macsec->real_dev;
+ 	}
+ 
+-	return macsec->real_dev->phydev->macsec_ops;
++	if (offload == MACSEC_OFFLOAD_PHY)
++		return macsec->real_dev->phydev->macsec_ops;
++	else
++		return macsec->real_dev->macsec_ops;
+ }
+ 
+ /* Returns a pointer to the MACsec ops struct if any and updates the MACsec
+--- a/include/uapi/linux/if_link.h
++++ b/include/uapi/linux/if_link.h
+@@ -486,6 +486,7 @@ enum macsec_validation_type {
+ enum macsec_offload {
+ 	MACSEC_OFFLOAD_OFF = 0,
+ 	MACSEC_OFFLOAD_PHY = 1,
++	MACSEC_OFFLOAD_MAC = 2,
+ 	__MACSEC_OFFLOAD_END,
+ 	MACSEC_OFFLOAD_MAX = __MACSEC_OFFLOAD_END - 1,
+ };
+--- a/tools/include/uapi/linux/if_link.h
++++ b/tools/include/uapi/linux/if_link.h
+@@ -486,6 +486,7 @@ enum macsec_validation_type {
+ enum macsec_offload {
+ 	MACSEC_OFFLOAD_OFF = 0,
+ 	MACSEC_OFFLOAD_PHY = 1,
++	MACSEC_OFFLOAD_MAC = 2,
+ 	__MACSEC_OFFLOAD_END,
+ 	MACSEC_OFFLOAD_MAX = __MACSEC_OFFLOAD_END - 1,
+ };
diff --git a/target/linux/mediatek/patches-5.4/999-1769-05-v5.18-net-macsec-init-secy-pointer-in-macsec_context.patch b/target/linux/mediatek/patches-5.4/999-1769-05-v5.18-net-macsec-init-secy-pointer-in-macsec_context.patch
new file mode 100644
index 0000000..017e415
--- /dev/null
+++ b/target/linux/mediatek/patches-5.4/999-1769-05-v5.18-net-macsec-init-secy-pointer-in-macsec_context.patch
@@ -0,0 +1,131 @@
+From 182879f89b858fede98136ea3ad45fe9c7178387 Mon Sep 17 00:00:00 2001
+From: Dmitry Bogdanov <dbogdanov@marvell.com>
+Date: Wed, 25 Mar 2020 15:52:34 +0300
+Subject: net: macsec: init secy pointer in macsec_context
+
+This patch adds secy pointer initialization in the macsec_context.
+It will be used by MAC drivers in offloading operations.
+
+Signed-off-by: Dmitry Bogdanov <dbogdanov@marvell.com>
+Signed-off-by: Mark Starovoytov <mstarovoitov@marvell.com>
+Signed-off-by: Igor Russkikh <irusskikh@marvell.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+---
+ drivers/net/macsec.c | 16 +++++++++++++++-
+ 1 file changed, 15 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
+index d29c072e19af6..093e81d605ec7 100644
+--- a/drivers/net/macsec.c
++++ b/drivers/net/macsec.c
+@@ -1794,6 +1794,7 @@ static int macsec_add_rxsa(struct sk_buff *skb, struct genl_info *info)
+ 
+ 		ctx.sa.assoc_num = assoc_num;
+ 		ctx.sa.rx_sa = rx_sa;
++		ctx.secy = secy;
+ 		memcpy(ctx.sa.key, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]),
+ 		       MACSEC_KEYID_LEN);
+ 
+@@ -1841,6 +1842,7 @@ static int macsec_add_rxsc(struct sk_buff *skb, struct genl_info *info)
+ 	struct nlattr **attrs = info->attrs;
+ 	struct macsec_rx_sc *rx_sc;
+ 	struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1];
++	struct macsec_secy *secy;
+ 	bool was_active;
+ 	int ret;
+ 
+@@ -1860,6 +1862,7 @@ static int macsec_add_rxsc(struct sk_buff *skb, struct genl_info *info)
+ 		return PTR_ERR(dev);
+ 	}
+ 
++	secy = &macsec_priv(dev)->secy;
+ 	sci = nla_get_sci(tb_rxsc[MACSEC_RXSC_ATTR_SCI]);
+ 
+ 	rx_sc = create_rx_sc(dev, sci);
+@@ -1883,6 +1886,7 @@ static int macsec_add_rxsc(struct sk_buff *skb, struct genl_info *info)
+ 		}
+ 
+ 		ctx.rx_sc = rx_sc;
++		ctx.secy = secy;
+ 
+ 		ret = macsec_offload(ops->mdo_add_rxsc, &ctx);
+ 		if (ret)
+@@ -2032,6 +2036,7 @@ static int macsec_add_txsa(struct sk_buff *skb, struct genl_info *info)
+ 
+ 		ctx.sa.assoc_num = assoc_num;
+ 		ctx.sa.tx_sa = tx_sa;
++		ctx.secy = secy;
+ 		memcpy(ctx.sa.key, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]),
+ 		       MACSEC_KEYID_LEN);
+ 
+@@ -2107,6 +2112,7 @@ static int macsec_del_rxsa(struct sk_buff *skb, struct genl_info *info)
+ 
+ 		ctx.sa.assoc_num = assoc_num;
+ 		ctx.sa.rx_sa = rx_sa;
++		ctx.secy = secy;
+ 
+ 		ret = macsec_offload(ops->mdo_del_rxsa, &ctx);
+ 		if (ret)
+@@ -2172,6 +2178,7 @@ static int macsec_del_rxsc(struct sk_buff *skb, struct genl_info *info)
+ 		}
+ 
+ 		ctx.rx_sc = rx_sc;
++		ctx.secy = secy;
+ 		ret = macsec_offload(ops->mdo_del_rxsc, &ctx);
+ 		if (ret)
+ 			goto cleanup;
+@@ -2230,6 +2237,7 @@ static int macsec_del_txsa(struct sk_buff *skb, struct genl_info *info)
+ 
+ 		ctx.sa.assoc_num = assoc_num;
+ 		ctx.sa.tx_sa = tx_sa;
++		ctx.secy = secy;
+ 
+ 		ret = macsec_offload(ops->mdo_del_txsa, &ctx);
+ 		if (ret)
+@@ -2341,6 +2349,7 @@ static int macsec_upd_txsa(struct sk_buff *skb, struct genl_info *info)
+ 
+ 		ctx.sa.assoc_num = assoc_num;
+ 		ctx.sa.tx_sa = tx_sa;
++		ctx.secy = secy;
+ 
+ 		ret = macsec_offload(ops->mdo_upd_txsa, &ctx);
+ 		if (ret)
+@@ -2433,6 +2442,7 @@ static int macsec_upd_rxsa(struct sk_buff *skb, struct genl_info *info)
+ 
+ 		ctx.sa.assoc_num = assoc_num;
+ 		ctx.sa.rx_sa = rx_sa;
++		ctx.secy = secy;
+ 
+ 		ret = macsec_offload(ops->mdo_upd_rxsa, &ctx);
+ 		if (ret)
+@@ -2503,6 +2513,7 @@ static int macsec_upd_rxsc(struct sk_buff *skb, struct genl_info *info)
+ 		}
+ 
+ 		ctx.rx_sc = rx_sc;
++		ctx.secy = secy;
+ 
+ 		ret = macsec_offload(ops->mdo_upd_rxsc, &ctx);
+ 		if (ret)
+@@ -3370,6 +3381,7 @@ static int macsec_dev_open(struct net_device *dev)
+ 			goto clear_allmulti;
+ 		}
+ 
++		ctx.secy = &macsec->secy;
+ 		err = macsec_offload(ops->mdo_dev_open, &ctx);
+ 		if (err)
+ 			goto clear_allmulti;
+@@ -3401,8 +3413,10 @@ static int macsec_dev_stop(struct net_device *dev)
+ 		struct macsec_context ctx;
+ 
+ 		ops = macsec_get_ops(macsec, &ctx);
+-		if (ops)
++		if (ops) {
++			ctx.secy = &macsec->secy;
+ 			macsec_offload(ops->mdo_dev_stop, &ctx);
++		}
+ 	}
+ 
+ 	dev_mc_unsync(real_dev, dev);
+-- 
+cgit 1.2.3-1.el7
+
diff --git a/target/linux/mediatek/patches-5.4/999-1770-06-v5.18-net-macsec-allow-multiple-macsec-devices-with-offload.patch b/target/linux/mediatek/patches-5.4/999-1770-06-v5.18-net-macsec-allow-multiple-macsec-devices-with-offload.patch
new file mode 100644
index 0000000..4ef5870
--- /dev/null
+++ b/target/linux/mediatek/patches-5.4/999-1770-06-v5.18-net-macsec-allow-multiple-macsec-devices-with-offload.patch
@@ -0,0 +1,65 @@
+From a249f8050624f92f844605274de3367e2c8ac706 Mon Sep 17 00:00:00 2001
+From: Dmitry Bogdanov <dbogdanov@marvell.com>
+Date: Wed, 25 Mar 2020 15:52:35 +0300
+Subject: net: macsec: allow multiple macsec devices with offload
+
+Offload engine can setup several SecY. Each macsec interface shall have
+its own mac address. It will filter a traffic by dest mac address.
+
+Signed-off-by: Dmitry Bogdanov <dbogdanov@marvell.com>
+Signed-off-by: Mark Starovoytov <mstarovoitov@marvell.com>
+Signed-off-by: Igor Russkikh <irusskikh@marvell.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+---
+ drivers/net/macsec.c | 25 +------------------------
+ 1 file changed, 1 insertion(+), 24 deletions(-)
+
+diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
+index 093e81d605ec7..146a7881a20ac 100644
+--- a/drivers/net/macsec.c
++++ b/drivers/net/macsec.c
+@@ -2553,11 +2553,10 @@ static int macsec_upd_offload(struct sk_buff *skb, struct genl_info *info)
+ 	enum macsec_offload offload, prev_offload;
+ 	int (*func)(struct macsec_context *ctx);
+ 	struct nlattr **attrs = info->attrs;
+-	struct net_device *dev, *loop_dev;
++	struct net_device *dev;
+ 	const struct macsec_ops *ops;
+ 	struct macsec_context ctx;
+ 	struct macsec_dev *macsec;
+-	struct net *loop_net;
+ 	int ret;
+ 
+ 	if (!attrs[MACSEC_ATTR_IFINDEX])
+@@ -2585,28 +2584,6 @@ static int macsec_upd_offload(struct sk_buff *skb, struct genl_info *info)
+ 	    !macsec_check_offload(offload, macsec))
+ 		return -EOPNOTSUPP;
+ 
+-	if (offload == MACSEC_OFFLOAD_OFF)
+-		goto skip_limitation;
+-
+-	/* Check the physical interface isn't offloading another interface
+-	 * first.
+-	 */
+-	for_each_net(loop_net) {
+-		for_each_netdev(loop_net, loop_dev) {
+-			struct macsec_dev *priv;
+-
+-			if (!netif_is_macsec(loop_dev))
+-				continue;
+-
+-			priv = macsec_priv(loop_dev);
+-
+-			if (priv->real_dev == macsec->real_dev &&
+-			    priv->offload != MACSEC_OFFLOAD_OFF)
+-				return -EBUSY;
+-		}
+-	}
+-
+-skip_limitation:
+ 	/* Check if the net device is busy. */
+ 	if (netif_running(dev))
+ 		return -EBUSY;
+-- 
+cgit 1.2.3-1.el7
+
diff --git a/target/linux/mediatek/patches-5.4/999-1771-07-v5.18-net-macsec-support-multicast-broadcast-when-offloading.patch b/target/linux/mediatek/patches-5.4/999-1771-07-v5.18-net-macsec-support-multicast-broadcast-when-offloading.patch
new file mode 100644
index 0000000..2f17478
--- /dev/null
+++ b/target/linux/mediatek/patches-5.4/999-1771-07-v5.18-net-macsec-support-multicast-broadcast-when-offloading.patch
@@ -0,0 +1,105 @@
+From f428011b90ec0de7429886f753b7c3293392761c Mon Sep 17 00:00:00 2001
+From: Mark Starovoytov <mstarovoitov@marvell.com>
+Date: Wed, 25 Mar 2020 15:52:36 +0300
+Subject: net: macsec: support multicast/broadcast when offloading
+
+The idea is simple. If the frame is an exact match for the controlled port
+(based on DA comparison), then we simply divert this skb to matching port.
+
+Multicast/broadcast messages are delivered to all ports.
+
+Signed-off-by: Mark Starovoytov <mstarovoitov@marvell.com>
+Signed-off-by: Igor Russkikh <irusskikh@marvell.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+---
+ drivers/net/macsec.c | 51 ++++++++++++++++++++++++++++++++++++++-------------
+ 1 file changed, 38 insertions(+), 13 deletions(-)
+
+diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
+index 146a7881a20ac..c7ad7c6f1d1ec 100644
+--- a/drivers/net/macsec.c
++++ b/drivers/net/macsec.c
+@@ -1006,22 +1006,53 @@ static enum rx_handler_result handle_not_macsec(struct sk_buff *skb)
+ {
+ 	/* Deliver to the uncontrolled port by default */
+ 	enum rx_handler_result ret = RX_HANDLER_PASS;
++	struct ethhdr *hdr = eth_hdr(skb);
+ 	struct macsec_rxh_data *rxd;
+ 	struct macsec_dev *macsec;
+ 
+ 	rcu_read_lock();
+ 	rxd = macsec_data_rcu(skb->dev);
+ 
+-	/* 10.6 If the management control validateFrames is not
+-	 * Strict, frames without a SecTAG are received, counted, and
+-	 * delivered to the Controlled Port
+-	 */
+ 	list_for_each_entry_rcu(macsec, &rxd->secys, secys) {
+ 		struct sk_buff *nskb;
+ 		struct pcpu_secy_stats *secy_stats = this_cpu_ptr(macsec->stats);
++		struct net_device *ndev = macsec->secy.netdev;
+ 
+-		if (!macsec_is_offloaded(macsec) &&
+-		    macsec->secy.validate_frames == MACSEC_VALIDATE_STRICT) {
++		/* If h/w offloading is enabled, HW decodes frames and strips
++		 * the SecTAG, so we have to deduce which port to deliver to.
++		 */
++		if (macsec_is_offloaded(macsec) && netif_running(ndev)) {
++			if (ether_addr_equal_64bits(hdr->h_dest,
++						    ndev->dev_addr)) {
++				/* exact match, divert skb to this port */
++				skb->dev = ndev;
++				skb->pkt_type = PACKET_HOST;
++				ret = RX_HANDLER_ANOTHER;
++				goto out;
++			} else if (is_multicast_ether_addr_64bits(
++					   hdr->h_dest)) {
++				/* multicast frame, deliver on this port too */
++				nskb = skb_clone(skb, GFP_ATOMIC);
++				if (!nskb)
++					break;
++
++				nskb->dev = ndev;
++				if (ether_addr_equal_64bits(hdr->h_dest,
++							    ndev->broadcast))
++					nskb->pkt_type = PACKET_BROADCAST;
++				else
++					nskb->pkt_type = PACKET_MULTICAST;
++
++				netif_rx(nskb);
++			}
++			continue;
++		}
++
++		/* 10.6 If the management control validateFrames is not
++		 * Strict, frames without a SecTAG are received, counted, and
++		 * delivered to the Controlled Port
++		 */
++		if (macsec->secy.validate_frames == MACSEC_VALIDATE_STRICT) {
+ 			u64_stats_update_begin(&secy_stats->syncp);
+ 			secy_stats->stats.InPktsNoTag++;
+ 			u64_stats_update_end(&secy_stats->syncp);
+@@ -1033,19 +1064,13 @@ static enum rx_handler_result handle_not_macsec(struct sk_buff *skb)
+ 		if (!nskb)
+ 			break;
+ 
+-		nskb->dev = macsec->secy.netdev;
++		nskb->dev = ndev;
+ 
+ 		if (netif_rx(nskb) == NET_RX_SUCCESS) {
+ 			u64_stats_update_begin(&secy_stats->syncp);
+ 			secy_stats->stats.InPktsUntagged++;
+ 			u64_stats_update_end(&secy_stats->syncp);
+ 		}
+-
+-		if (netif_running(macsec->secy.netdev) &&
+-		    macsec_is_offloaded(macsec)) {
+-			ret = RX_HANDLER_EXACT;
+-			goto out;
+-		}
+ 	}
+ 
+ out:
+-- 
+cgit 1.2.3-1.el7
+
diff --git a/target/linux/mediatek/patches-5.4/999-1772-08-v5.18-net-macsec-add-support-for-getting-offloaded-stats.patch b/target/linux/mediatek/patches-5.4/999-1772-08-v5.18-net-macsec-add-support-for-getting-offloaded-stats.patch
new file mode 100644
index 0000000..5848eeb
--- /dev/null
+++ b/target/linux/mediatek/patches-5.4/999-1772-08-v5.18-net-macsec-add-support-for-getting-offloaded-stats.patch
@@ -0,0 +1,580 @@
+From b62c3624500a7e1cc081e75973299c1f7901a438 Mon Sep 17 00:00:00 2001
+From: Dmitry Bogdanov <dbogdanov@marvell.com>
+Date: Wed, 25 Mar 2020 15:52:37 +0300
+Subject: net: macsec: add support for getting offloaded stats
+
+When HW offloading is enabled, offloaded stats should be used, because
+s/w stats are wrong and out of sync with the HW in this case.
+
+Signed-off-by: Dmitry Bogdanov <dbogdanov@marvell.com>
+Signed-off-by: Mark Starovoytov <mstarovoitov@marvell.com>
+Signed-off-by: Igor Russkikh <irusskikh@marvell.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+---
+ drivers/net/macsec.c | 321 ++++++++++++++++++++++++++++++++++-----------------
+ include/net/macsec.h |  24 ++++
+ 2 files changed, 237 insertions(+), 108 deletions(-)
+
+diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
+index c7ad7c6f1d1ec..b00a078d13ffe 100644
+--- a/drivers/net/macsec.c
++++ b/drivers/net/macsec.c
+@@ -88,17 +88,6 @@ struct gcm_iv {
+ 	__be32 pn;
+ };
+ 
+-struct macsec_dev_stats {
+-	__u64 OutPktsUntagged;
+-	__u64 InPktsUntagged;
+-	__u64 OutPktsTooLong;
+-	__u64 InPktsNoTag;
+-	__u64 InPktsBadTag;
+-	__u64 InPktsUnknownSCI;
+-	__u64 InPktsNoSCI;
+-	__u64 InPktsOverrun;
+-};
+-
+ #define MACSEC_VALIDATE_DEFAULT MACSEC_VALIDATE_STRICT
+ 
+ struct pcpu_secy_stats {
+@@ -2653,207 +2642,309 @@ rollback:
+ 	return ret;
+ }
+ 
+-static int copy_tx_sa_stats(struct sk_buff *skb,
+-			    struct macsec_tx_sa_stats __percpu *pstats)
++static void get_tx_sa_stats(struct net_device *dev, int an,
++			    struct macsec_tx_sa *tx_sa,
++			    struct macsec_tx_sa_stats *sum)
+ {
+-	struct macsec_tx_sa_stats sum = {0, };
++	struct macsec_dev *macsec = macsec_priv(dev);
+ 	int cpu;
+ 
++	/* If h/w offloading is available, propagate to the device */
++	if (macsec_is_offloaded(macsec)) {
++		const struct macsec_ops *ops;
++		struct macsec_context ctx;
++
++		ops = macsec_get_ops(macsec, &ctx);
++		if (ops) {
++			ctx.sa.assoc_num = an;
++			ctx.sa.tx_sa = tx_sa;
++			ctx.stats.tx_sa_stats = sum;
++			ctx.secy = &macsec_priv(dev)->secy;
++			macsec_offload(ops->mdo_get_tx_sa_stats, &ctx);
++		}
++		return;
++	}
++
+ 	for_each_possible_cpu(cpu) {
+-		const struct macsec_tx_sa_stats *stats = per_cpu_ptr(pstats, cpu);
++		const struct macsec_tx_sa_stats *stats =
++			per_cpu_ptr(tx_sa->stats, cpu);
+ 
+-		sum.OutPktsProtected += stats->OutPktsProtected;
+-		sum.OutPktsEncrypted += stats->OutPktsEncrypted;
++		sum->OutPktsProtected += stats->OutPktsProtected;
++		sum->OutPktsEncrypted += stats->OutPktsEncrypted;
+ 	}
++}
+ 
+-	if (nla_put_u32(skb, MACSEC_SA_STATS_ATTR_OUT_PKTS_PROTECTED, sum.OutPktsProtected) ||
+-	    nla_put_u32(skb, MACSEC_SA_STATS_ATTR_OUT_PKTS_ENCRYPTED, sum.OutPktsEncrypted))
++static int copy_tx_sa_stats(struct sk_buff *skb, struct macsec_tx_sa_stats *sum)
++{
++	if (nla_put_u32(skb, MACSEC_SA_STATS_ATTR_OUT_PKTS_PROTECTED,
++			sum->OutPktsProtected) ||
++	    nla_put_u32(skb, MACSEC_SA_STATS_ATTR_OUT_PKTS_ENCRYPTED,
++			sum->OutPktsEncrypted))
+ 		return -EMSGSIZE;
+ 
+ 	return 0;
+ }
+ 
+-static noinline_for_stack int
+-copy_rx_sa_stats(struct sk_buff *skb,
+-		 struct macsec_rx_sa_stats __percpu *pstats)
++static void get_rx_sa_stats(struct net_device *dev,
++			    struct macsec_rx_sc *rx_sc, int an,
++			    struct macsec_rx_sa *rx_sa,
++			    struct macsec_rx_sa_stats *sum)
+ {
+-	struct macsec_rx_sa_stats sum = {0, };
++	struct macsec_dev *macsec = macsec_priv(dev);
+ 	int cpu;
+ 
++	/* If h/w offloading is available, propagate to the device */
++	if (macsec_is_offloaded(macsec)) {
++		const struct macsec_ops *ops;
++		struct macsec_context ctx;
++
++		ops = macsec_get_ops(macsec, &ctx);
++		if (ops) {
++			ctx.sa.assoc_num = an;
++			ctx.sa.rx_sa = rx_sa;
++			ctx.stats.rx_sa_stats = sum;
++			ctx.secy = &macsec_priv(dev)->secy;
++			ctx.rx_sc = rx_sc;
++			macsec_offload(ops->mdo_get_rx_sa_stats, &ctx);
++		}
++		return;
++	}
++
+ 	for_each_possible_cpu(cpu) {
+-		const struct macsec_rx_sa_stats *stats = per_cpu_ptr(pstats, cpu);
++		const struct macsec_rx_sa_stats *stats =
++			per_cpu_ptr(rx_sa->stats, cpu);
+ 
+-		sum.InPktsOK         += stats->InPktsOK;
+-		sum.InPktsInvalid    += stats->InPktsInvalid;
+-		sum.InPktsNotValid   += stats->InPktsNotValid;
+-		sum.InPktsNotUsingSA += stats->InPktsNotUsingSA;
+-		sum.InPktsUnusedSA   += stats->InPktsUnusedSA;
++		sum->InPktsOK         += stats->InPktsOK;
++		sum->InPktsInvalid    += stats->InPktsInvalid;
++		sum->InPktsNotValid   += stats->InPktsNotValid;
++		sum->InPktsNotUsingSA += stats->InPktsNotUsingSA;
++		sum->InPktsUnusedSA   += stats->InPktsUnusedSA;
+ 	}
++}
+ 
+-	if (nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_OK, sum.InPktsOK) ||
+-	    nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_INVALID, sum.InPktsInvalid) ||
+-	    nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_NOT_VALID, sum.InPktsNotValid) ||
+-	    nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_NOT_USING_SA, sum.InPktsNotUsingSA) ||
+-	    nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_UNUSED_SA, sum.InPktsUnusedSA))
++static int copy_rx_sa_stats(struct sk_buff *skb,
++			    struct macsec_rx_sa_stats *sum)
++{
++	if (nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_OK, sum->InPktsOK) ||
++	    nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_INVALID,
++			sum->InPktsInvalid) ||
++	    nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_NOT_VALID,
++			sum->InPktsNotValid) ||
++	    nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_NOT_USING_SA,
++			sum->InPktsNotUsingSA) ||
++	    nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_UNUSED_SA,
++			sum->InPktsUnusedSA))
+ 		return -EMSGSIZE;
+ 
+ 	return 0;
+ }
+ 
+-static noinline_for_stack int
+-copy_rx_sc_stats(struct sk_buff *skb, struct pcpu_rx_sc_stats __percpu *pstats)
++static void get_rx_sc_stats(struct net_device *dev,
++			    struct macsec_rx_sc *rx_sc,
++			    struct macsec_rx_sc_stats *sum)
+ {
+-	struct macsec_rx_sc_stats sum = {0, };
++	struct macsec_dev *macsec = macsec_priv(dev);
+ 	int cpu;
+ 
++	/* If h/w offloading is available, propagate to the device */
++	if (macsec_is_offloaded(macsec)) {
++		const struct macsec_ops *ops;
++		struct macsec_context ctx;
++
++		ops = macsec_get_ops(macsec, &ctx);
++		if (ops) {
++			ctx.stats.rx_sc_stats = sum;
++			ctx.secy = &macsec_priv(dev)->secy;
++			ctx.rx_sc = rx_sc;
++			macsec_offload(ops->mdo_get_rx_sc_stats, &ctx);
++		}
++		return;
++	}
++
+ 	for_each_possible_cpu(cpu) {
+ 		const struct pcpu_rx_sc_stats *stats;
+ 		struct macsec_rx_sc_stats tmp;
+ 		unsigned int start;
+ 
+-		stats = per_cpu_ptr(pstats, cpu);
++		stats = per_cpu_ptr(rx_sc->stats, cpu);
+ 		do {
+ 			start = u64_stats_fetch_begin_irq(&stats->syncp);
+ 			memcpy(&tmp, &stats->stats, sizeof(tmp));
+ 		} while (u64_stats_fetch_retry_irq(&stats->syncp, start));
+ 
+-		sum.InOctetsValidated += tmp.InOctetsValidated;
+-		sum.InOctetsDecrypted += tmp.InOctetsDecrypted;
+-		sum.InPktsUnchecked   += tmp.InPktsUnchecked;
+-		sum.InPktsDelayed     += tmp.InPktsDelayed;
+-		sum.InPktsOK          += tmp.InPktsOK;
+-		sum.InPktsInvalid     += tmp.InPktsInvalid;
+-		sum.InPktsLate        += tmp.InPktsLate;
+-		sum.InPktsNotValid    += tmp.InPktsNotValid;
+-		sum.InPktsNotUsingSA  += tmp.InPktsNotUsingSA;
+-		sum.InPktsUnusedSA    += tmp.InPktsUnusedSA;
++		sum->InOctetsValidated += tmp.InOctetsValidated;
++		sum->InOctetsDecrypted += tmp.InOctetsDecrypted;
++		sum->InPktsUnchecked   += tmp.InPktsUnchecked;
++		sum->InPktsDelayed     += tmp.InPktsDelayed;
++		sum->InPktsOK          += tmp.InPktsOK;
++		sum->InPktsInvalid     += tmp.InPktsInvalid;
++		sum->InPktsLate        += tmp.InPktsLate;
++		sum->InPktsNotValid    += tmp.InPktsNotValid;
++		sum->InPktsNotUsingSA  += tmp.InPktsNotUsingSA;
++		sum->InPktsUnusedSA    += tmp.InPktsUnusedSA;
+ 	}
++}
+ 
++static int copy_rx_sc_stats(struct sk_buff *skb, struct macsec_rx_sc_stats *sum)
++{
+ 	if (nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_OCTETS_VALIDATED,
+-			      sum.InOctetsValidated,
++			      sum->InOctetsValidated,
+ 			      MACSEC_RXSC_STATS_ATTR_PAD) ||
+ 	    nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_OCTETS_DECRYPTED,
+-			      sum.InOctetsDecrypted,
++			      sum->InOctetsDecrypted,
+ 			      MACSEC_RXSC_STATS_ATTR_PAD) ||
+ 	    nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_UNCHECKED,
+-			      sum.InPktsUnchecked,
++			      sum->InPktsUnchecked,
+ 			      MACSEC_RXSC_STATS_ATTR_PAD) ||
+ 	    nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_DELAYED,
+-			      sum.InPktsDelayed,
++			      sum->InPktsDelayed,
+ 			      MACSEC_RXSC_STATS_ATTR_PAD) ||
+ 	    nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_OK,
+-			      sum.InPktsOK,
++			      sum->InPktsOK,
+ 			      MACSEC_RXSC_STATS_ATTR_PAD) ||
+ 	    nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_INVALID,
+-			      sum.InPktsInvalid,
++			      sum->InPktsInvalid,
+ 			      MACSEC_RXSC_STATS_ATTR_PAD) ||
+ 	    nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_LATE,
+-			      sum.InPktsLate,
++			      sum->InPktsLate,
+ 			      MACSEC_RXSC_STATS_ATTR_PAD) ||
+ 	    nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_NOT_VALID,
+-			      sum.InPktsNotValid,
++			      sum->InPktsNotValid,
+ 			      MACSEC_RXSC_STATS_ATTR_PAD) ||
+ 	    nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_NOT_USING_SA,
+-			      sum.InPktsNotUsingSA,
++			      sum->InPktsNotUsingSA,
+ 			      MACSEC_RXSC_STATS_ATTR_PAD) ||
+ 	    nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_UNUSED_SA,
+-			      sum.InPktsUnusedSA,
++			      sum->InPktsUnusedSA,
+ 			      MACSEC_RXSC_STATS_ATTR_PAD))
+ 		return -EMSGSIZE;
+ 
+ 	return 0;
+ }
+ 
+-static noinline_for_stack int
+-copy_tx_sc_stats(struct sk_buff *skb, struct pcpu_tx_sc_stats __percpu *pstats)
++static void get_tx_sc_stats(struct net_device *dev,
++			    struct macsec_tx_sc_stats *sum)
+ {
+-	struct macsec_tx_sc_stats sum = {0, };
++	struct macsec_dev *macsec = macsec_priv(dev);
+ 	int cpu;
+ 
++	/* If h/w offloading is available, propagate to the device */
++	if (macsec_is_offloaded(macsec)) {
++		const struct macsec_ops *ops;
++		struct macsec_context ctx;
++
++		ops = macsec_get_ops(macsec, &ctx);
++		if (ops) {
++			ctx.stats.tx_sc_stats = sum;
++			ctx.secy = &macsec_priv(dev)->secy;
++			macsec_offload(ops->mdo_get_tx_sc_stats, &ctx);
++		}
++		return;
++	}
++
+ 	for_each_possible_cpu(cpu) {
+ 		const struct pcpu_tx_sc_stats *stats;
+ 		struct macsec_tx_sc_stats tmp;
+ 		unsigned int start;
+ 
+-		stats = per_cpu_ptr(pstats, cpu);
++		stats = per_cpu_ptr(macsec_priv(dev)->secy.tx_sc.stats, cpu);
+ 		do {
+ 			start = u64_stats_fetch_begin_irq(&stats->syncp);
+ 			memcpy(&tmp, &stats->stats, sizeof(tmp));
+ 		} while (u64_stats_fetch_retry_irq(&stats->syncp, start));
+ 
+-		sum.OutPktsProtected   += tmp.OutPktsProtected;
+-		sum.OutPktsEncrypted   += tmp.OutPktsEncrypted;
+-		sum.OutOctetsProtected += tmp.OutOctetsProtected;
+-		sum.OutOctetsEncrypted += tmp.OutOctetsEncrypted;
++		sum->OutPktsProtected   += tmp.OutPktsProtected;
++		sum->OutPktsEncrypted   += tmp.OutPktsEncrypted;
++		sum->OutOctetsProtected += tmp.OutOctetsProtected;
++		sum->OutOctetsEncrypted += tmp.OutOctetsEncrypted;
+ 	}
++}
+ 
++static int copy_tx_sc_stats(struct sk_buff *skb, struct macsec_tx_sc_stats *sum)
++{
+ 	if (nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_PKTS_PROTECTED,
+-			      sum.OutPktsProtected,
++			      sum->OutPktsProtected,
+ 			      MACSEC_TXSC_STATS_ATTR_PAD) ||
+ 	    nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_PKTS_ENCRYPTED,
+-			      sum.OutPktsEncrypted,
++			      sum->OutPktsEncrypted,
+ 			      MACSEC_TXSC_STATS_ATTR_PAD) ||
+ 	    nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_OCTETS_PROTECTED,
+-			      sum.OutOctetsProtected,
++			      sum->OutOctetsProtected,
+ 			      MACSEC_TXSC_STATS_ATTR_PAD) ||
+ 	    nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_OCTETS_ENCRYPTED,
+-			      sum.OutOctetsEncrypted,
++			      sum->OutOctetsEncrypted,
+ 			      MACSEC_TXSC_STATS_ATTR_PAD))
+ 		return -EMSGSIZE;
+ 
+ 	return 0;
+ }
+ 
+-static noinline_for_stack int
+-copy_secy_stats(struct sk_buff *skb, struct pcpu_secy_stats __percpu *pstats)
++static void get_secy_stats(struct net_device *dev, struct macsec_dev_stats *sum)
+ {
+-	struct macsec_dev_stats sum = {0, };
++	struct macsec_dev *macsec = macsec_priv(dev);
+ 	int cpu;
+ 
++	/* If h/w offloading is available, propagate to the device */
++	if (macsec_is_offloaded(macsec)) {
++		const struct macsec_ops *ops;
++		struct macsec_context ctx;
++
++		ops = macsec_get_ops(macsec, &ctx);
++		if (ops) {
++			ctx.stats.dev_stats = sum;
++			ctx.secy = &macsec_priv(dev)->secy;
++			macsec_offload(ops->mdo_get_dev_stats, &ctx);
++		}
++		return;
++	}
++
+ 	for_each_possible_cpu(cpu) {
+ 		const struct pcpu_secy_stats *stats;
+ 		struct macsec_dev_stats tmp;
+ 		unsigned int start;
+ 
+-		stats = per_cpu_ptr(pstats, cpu);
++		stats = per_cpu_ptr(macsec_priv(dev)->stats, cpu);
+ 		do {
+ 			start = u64_stats_fetch_begin_irq(&stats->syncp);
+ 			memcpy(&tmp, &stats->stats, sizeof(tmp));
+ 		} while (u64_stats_fetch_retry_irq(&stats->syncp, start));
+ 
+-		sum.OutPktsUntagged  += tmp.OutPktsUntagged;
+-		sum.InPktsUntagged   += tmp.InPktsUntagged;
+-		sum.OutPktsTooLong   += tmp.OutPktsTooLong;
+-		sum.InPktsNoTag      += tmp.InPktsNoTag;
+-		sum.InPktsBadTag     += tmp.InPktsBadTag;
+-		sum.InPktsUnknownSCI += tmp.InPktsUnknownSCI;
+-		sum.InPktsNoSCI      += tmp.InPktsNoSCI;
+-		sum.InPktsOverrun    += tmp.InPktsOverrun;
++		sum->OutPktsUntagged  += tmp.OutPktsUntagged;
++		sum->InPktsUntagged   += tmp.InPktsUntagged;
++		sum->OutPktsTooLong   += tmp.OutPktsTooLong;
++		sum->InPktsNoTag      += tmp.InPktsNoTag;
++		sum->InPktsBadTag     += tmp.InPktsBadTag;
++		sum->InPktsUnknownSCI += tmp.InPktsUnknownSCI;
++		sum->InPktsNoSCI      += tmp.InPktsNoSCI;
++		sum->InPktsOverrun    += tmp.InPktsOverrun;
+ 	}
++}
+ 
++static int copy_secy_stats(struct sk_buff *skb, struct macsec_dev_stats *sum)
++{
+ 	if (nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_OUT_PKTS_UNTAGGED,
+-			      sum.OutPktsUntagged,
++			      sum->OutPktsUntagged,
+ 			      MACSEC_SECY_STATS_ATTR_PAD) ||
+ 	    nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_UNTAGGED,
+-			      sum.InPktsUntagged,
++			      sum->InPktsUntagged,
+ 			      MACSEC_SECY_STATS_ATTR_PAD) ||
+ 	    nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_OUT_PKTS_TOO_LONG,
+-			      sum.OutPktsTooLong,
++			      sum->OutPktsTooLong,
+ 			      MACSEC_SECY_STATS_ATTR_PAD) ||
+ 	    nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_NO_TAG,
+-			      sum.InPktsNoTag,
++			      sum->InPktsNoTag,
+ 			      MACSEC_SECY_STATS_ATTR_PAD) ||
+ 	    nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_BAD_TAG,
+-			      sum.InPktsBadTag,
++			      sum->InPktsBadTag,
+ 			      MACSEC_SECY_STATS_ATTR_PAD) ||
+ 	    nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_UNKNOWN_SCI,
+-			      sum.InPktsUnknownSCI,
++			      sum->InPktsUnknownSCI,
+ 			      MACSEC_SECY_STATS_ATTR_PAD) ||
+ 	    nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_NO_SCI,
+-			      sum.InPktsNoSCI,
++			      sum->InPktsNoSCI,
+ 			      MACSEC_SECY_STATS_ATTR_PAD) ||
+ 	    nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_OVERRUN,
+-			      sum.InPktsOverrun,
++			      sum->InPktsOverrun,
+ 			      MACSEC_SECY_STATS_ATTR_PAD))
+ 		return -EMSGSIZE;
+ 
+@@ -2914,7 +3005,12 @@ static noinline_for_stack int
+ dump_secy(struct macsec_secy *secy, struct net_device *dev,
+ 	  struct sk_buff *skb, struct netlink_callback *cb)
+ {
++	struct macsec_tx_sc_stats tx_sc_stats = {0, };
++	struct macsec_tx_sa_stats tx_sa_stats = {0, };
++	struct macsec_rx_sc_stats rx_sc_stats = {0, };
++	struct macsec_rx_sa_stats rx_sa_stats = {0, };
+ 	struct macsec_dev *macsec = netdev_priv(dev);
++	struct macsec_dev_stats dev_stats = {0, };
+ 	struct macsec_tx_sc *tx_sc = &secy->tx_sc;
+ 	struct nlattr *txsa_list, *rxsc_list;
+ 	struct macsec_rx_sc *rx_sc;
+@@ -2945,7 +3041,9 @@ dump_secy(struct macsec_secy *secy, struct net_device *dev,
+ 	attr = nla_nest_start_noflag(skb, MACSEC_ATTR_TXSC_STATS);
+ 	if (!attr)
+ 		goto nla_put_failure;
+-	if (copy_tx_sc_stats(skb, tx_sc->stats)) {
++
++	get_tx_sc_stats(dev, &tx_sc_stats);
++	if (copy_tx_sc_stats(skb, &tx_sc_stats)) {
+ 		nla_nest_cancel(skb, attr);
+ 		goto nla_put_failure;
+ 	}
+@@ -2954,7 +3052,8 @@ dump_secy(struct macsec_secy *secy, struct net_device *dev,
+ 	attr = nla_nest_start_noflag(skb, MACSEC_ATTR_SECY_STATS);
+ 	if (!attr)
+ 		goto nla_put_failure;
+-	if (copy_secy_stats(skb, macsec_priv(dev)->stats)) {
++	get_secy_stats(dev, &dev_stats);
++	if (copy_secy_stats(skb, &dev_stats)) {
+ 		nla_nest_cancel(skb, attr);
+ 		goto nla_put_failure;
+ 	}
+@@ -2978,6 +3077,22 @@ dump_secy(struct macsec_secy *secy, struct net_device *dev,
+ 			goto nla_put_failure;
+ 		}
+ 
++		attr = nla_nest_start_noflag(skb, MACSEC_SA_ATTR_STATS);
++		if (!attr) {
++			nla_nest_cancel(skb, txsa_nest);
++			nla_nest_cancel(skb, txsa_list);
++			goto nla_put_failure;
++		}
++		memset(&tx_sa_stats, 0, sizeof(tx_sa_stats));
++		get_tx_sa_stats(dev, i, tx_sa, &tx_sa_stats);
++		if (copy_tx_sa_stats(skb, &tx_sa_stats)) {
++			nla_nest_cancel(skb, attr);
++			nla_nest_cancel(skb, txsa_nest);
++			nla_nest_cancel(skb, txsa_list);
++			goto nla_put_failure;
++		}
++		nla_nest_end(skb, attr);
++
+ 		if (secy->xpn) {
+ 			pn = tx_sa->next_pn;
+ 			pn_len = MACSEC_XPN_PN_LEN;
+@@ -2996,20 +3111,6 @@ dump_secy(struct macsec_secy *secy, struct net_device *dev,
+ 			goto nla_put_failure;
+ 		}
+ 
+-		attr = nla_nest_start_noflag(skb, MACSEC_SA_ATTR_STATS);
+-		if (!attr) {
+-			nla_nest_cancel(skb, txsa_nest);
+-			nla_nest_cancel(skb, txsa_list);
+-			goto nla_put_failure;
+-		}
+-		if (copy_tx_sa_stats(skb, tx_sa->stats)) {
+-			nla_nest_cancel(skb, attr);
+-			nla_nest_cancel(skb, txsa_nest);
+-			nla_nest_cancel(skb, txsa_list);
+-			goto nla_put_failure;
+-		}
+-		nla_nest_end(skb, attr);
+-
+ 		nla_nest_end(skb, txsa_nest);
+ 	}
+ 	nla_nest_end(skb, txsa_list);
+@@ -3043,7 +3144,9 @@ dump_secy(struct macsec_secy *secy, struct net_device *dev,
+ 			nla_nest_cancel(skb, rxsc_list);
+ 			goto nla_put_failure;
+ 		}
+-		if (copy_rx_sc_stats(skb, rx_sc->stats)) {
++		memset(&rx_sc_stats, 0, sizeof(rx_sc_stats));
++		get_rx_sc_stats(dev, rx_sc, &rx_sc_stats);
++		if (copy_rx_sc_stats(skb, &rx_sc_stats)) {
+ 			nla_nest_cancel(skb, attr);
+ 			nla_nest_cancel(skb, rxsc_nest);
+ 			nla_nest_cancel(skb, rxsc_list);
+@@ -3084,7 +3187,9 @@ dump_secy(struct macsec_secy *secy, struct net_device *dev,
+ 				nla_nest_cancel(skb, rxsc_list);
+ 				goto nla_put_failure;
+ 			}
+-			if (copy_rx_sa_stats(skb, rx_sa->stats)) {
++			memset(&rx_sa_stats, 0, sizeof(rx_sa_stats));
++			get_rx_sa_stats(dev, rx_sc, i, rx_sa, &rx_sa_stats);
++			if (copy_rx_sa_stats(skb, &rx_sa_stats)) {
+ 				nla_nest_cancel(skb, attr);
+ 				nla_nest_cancel(skb, rxsa_list);
+ 				nla_nest_cancel(skb, rxsc_nest);
+diff --git a/include/net/macsec.h b/include/net/macsec.h
+index 71de2c863df70..52874cdfe2260 100644
+--- a/include/net/macsec.h
++++ b/include/net/macsec.h
+@@ -88,6 +88,17 @@ struct macsec_tx_sc_stats {
+ 	__u64 OutOctetsEncrypted;
+ };
+ 
++struct macsec_dev_stats {
++	__u64 OutPktsUntagged;
++	__u64 InPktsUntagged;
++	__u64 OutPktsTooLong;
++	__u64 InPktsNoTag;
++	__u64 InPktsBadTag;
++	__u64 InPktsUnknownSCI;
++	__u64 InPktsNoSCI;
++	__u64 InPktsOverrun;
++};
++
+ /**
+  * struct macsec_rx_sa - receive secure association
+  * @active:
+@@ -236,6 +247,13 @@ struct macsec_context {
+ 			struct macsec_tx_sa *tx_sa;
+ 		};
+ 	} sa;
++	union {
++		struct macsec_tx_sc_stats *tx_sc_stats;
++		struct macsec_tx_sa_stats *tx_sa_stats;
++		struct macsec_rx_sc_stats *rx_sc_stats;
++		struct macsec_rx_sa_stats *rx_sa_stats;
++		struct macsec_dev_stats  *dev_stats;
++	} stats;
+ 
+ 	u8 prepare:1;
+ };
+@@ -262,6 +280,12 @@ struct macsec_ops {
+ 	int (*mdo_add_txsa)(struct macsec_context *ctx);
+ 	int (*mdo_upd_txsa)(struct macsec_context *ctx);
+ 	int (*mdo_del_txsa)(struct macsec_context *ctx);
++	/* Statistics */
++	int (*mdo_get_dev_stats)(struct macsec_context *ctx);
++	int (*mdo_get_tx_sc_stats)(struct macsec_context *ctx);
++	int (*mdo_get_tx_sa_stats)(struct macsec_context *ctx);
++	int (*mdo_get_rx_sc_stats)(struct macsec_context *ctx);
++	int (*mdo_get_rx_sa_stats)(struct macsec_context *ctx);
+ };
+ 
+ void macsec_pn_wrapped(struct macsec_secy *secy, struct macsec_tx_sa *tx_sa);
+-- 
+cgit 1.2.3-1.el7
+
diff --git a/target/linux/mediatek/patches-5.4/999-1773-09-v5.18-net-macsec-report-real_dev-features-when-HW-offloading-is-enabled.patch b/target/linux/mediatek/patches-5.4/999-1773-09-v5.18-net-macsec-report-real_dev-features-when-HW-offloading-is-enabled.patch
new file mode 100644
index 0000000..b91bb9b
--- /dev/null
+++ b/target/linux/mediatek/patches-5.4/999-1773-09-v5.18-net-macsec-report-real_dev-features-when-HW-offloading-is-enabled.patch
@@ -0,0 +1,94 @@
+From c850240b6c4132574a00f2da439277ab94265b66 Mon Sep 17 00:00:00 2001
+From: Mark Starovoytov <mstarovoitov@marvell.com>
+Date: Wed, 25 Mar 2020 15:52:38 +0300
+Subject: net: macsec: report real_dev features when HW offloading is enabled
+
+This patch makes real_dev_feature propagation by MACSec offloaded device.
+
+Issue description:
+real_dev features are disabled upon macsec creation.
+
+Root cause:
+Features limitation (specific to SW MACSec limitation) is being applied
+to HW offloaded case as well.
+This causes 'set_features' request on the real_dev with reduced feature
+set due to chain propagation.
+
+Proposed solution:
+Report real_dev features when HW offloading is enabled.
+NB! MACSec offloaded device does not propagate VLAN offload features at
+the moment. This can potentially be added later on as a separate patch.
+
+Note: this patch requires HW offloading to be enabled by default in order
+to function properly.
+
+Signed-off-by: Mark Starovoytov <mstarovoitov@marvell.com>
+Signed-off-by: Igor Russkikh <irusskikh@marvell.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+---
+ drivers/net/macsec.c | 26 ++++++++++++++++++++++----
+ 1 file changed, 22 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
+index b00a078d13ffe..2dad91cba459c 100644
+--- a/drivers/net/macsec.c
++++ b/drivers/net/macsec.c
+@@ -2633,6 +2633,10 @@ static int macsec_upd_offload(struct sk_buff *skb, struct genl_info *info)
+ 		goto rollback;
+ 
+ 	rtnl_unlock();
++	/* Force features update, since they are different for SW MACSec and
++	 * HW offloading cases.
++	 */
++	netdev_update_features(dev);
+ 	return 0;
+ 
+ rollback:
+@@ -3399,9 +3403,16 @@ static netdev_tx_t macsec_start_xmit(struct sk_buff *skb,
+ 	return ret;
+ }
+ 
+-#define MACSEC_FEATURES \
++#define SW_MACSEC_FEATURES \
+ 	(NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST)
+ 
++/* If h/w offloading is enabled, use real device features save for
++ *   VLAN_FEATURES - they require additional ops
++ *   HW_MACSEC - no reason to report it
++ */
++#define REAL_DEV_FEATURES(dev) \
++	((dev)->features & ~(NETIF_F_VLAN_FEATURES | NETIF_F_HW_MACSEC))
++
+ static int macsec_dev_init(struct net_device *dev)
+ {
+ 	struct macsec_dev *macsec = macsec_priv(dev);
+@@ -3418,8 +3429,12 @@ static int macsec_dev_init(struct net_device *dev)
+ 		return err;
+ 	}
+ 
+-	dev->features = real_dev->features & MACSEC_FEATURES;
+-	dev->features |= NETIF_F_LLTX | NETIF_F_GSO_SOFTWARE;
++	if (macsec_is_offloaded(macsec)) {
++		dev->features = REAL_DEV_FEATURES(real_dev);
++	} else {
++		dev->features = real_dev->features & SW_MACSEC_FEATURES;
++		dev->features |= NETIF_F_LLTX | NETIF_F_GSO_SOFTWARE;
++	}
+ 
+ 	dev->needed_headroom = real_dev->needed_headroom +
+ 			       MACSEC_NEEDED_HEADROOM;
+@@ -3448,7 +3463,10 @@ static netdev_features_t macsec_fix_features(struct net_device *dev,
+ 	struct macsec_dev *macsec = macsec_priv(dev);
+ 	struct net_device *real_dev = macsec->real_dev;
+ 
+-	features &= (real_dev->features & MACSEC_FEATURES) |
++	if (macsec_is_offloaded(macsec))
++		return REAL_DEV_FEATURES(real_dev);
++
++	features &= (real_dev->features & SW_MACSEC_FEATURES) |
+ 		    NETIF_F_GSO_SOFTWARE | NETIF_F_SOFT_FEATURES;
+ 	features |= NETIF_F_LLTX;
+ 
+-- 
+cgit 1.2.3-1.el7
+
diff --git a/target/linux/mediatek/patches-5.4/999-1774-v5.18-net-macsec-add-support-for-specifying-offload-upon-link-creation.patch b/target/linux/mediatek/patches-5.4/999-1774-v5.18-net-macsec-add-support-for-specifying-offload-upon-link-creation.patch
new file mode 100644
index 0000000..8cac11c
--- /dev/null
+++ b/target/linux/mediatek/patches-5.4/999-1774-v5.18-net-macsec-add-support-for-specifying-offload-upon-link-creation.patch
@@ -0,0 +1,102 @@
+From 791bb3fcafcedd11f9066da9fee9342ecb6904d0 Mon Sep 17 00:00:00 2001
+From: Mark Starovoytov <mstarovoitov@marvell.com>
+Date: Wed, 25 Mar 2020 16:01:34 +0300
+Subject: net: macsec: add support for specifying offload upon link creation
+
+This patch adds new netlink attribute to allow a user to (optionally)
+specify the desired offload mode immediately upon MACSec link creation.
+
+Separate iproute patch will be required to support this from user space.
+
+Signed-off-by: Mark Starovoytov <mstarovoitov@marvell.com>
+Signed-off-by: Igor Russkikh <irusskikh@marvell.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+---
+ drivers/net/macsec.c               | 31 +++++++++++++++++++++++++++++--
+ include/uapi/linux/if_link.h       |  1 +
+ tools/include/uapi/linux/if_link.h |  1 +
+ 3 files changed, 31 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
+index 2dad91cba459c..da82d7f16a09d 100644
+--- a/drivers/net/macsec.c
++++ b/drivers/net/macsec.c
+@@ -1469,6 +1469,11 @@ static struct net_device *get_dev_from_nl(struct net *net,
+ 	return dev;
+ }
+ 
++static enum macsec_offload nla_get_offload(const struct nlattr *nla)
++{
++	return (__force enum macsec_offload)nla_get_u8(nla);
++}
++
+ static sci_t nla_get_sci(const struct nlattr *nla)
+ {
+ 	return (__force sci_t)nla_get_u64(nla);
+@@ -4012,8 +4017,16 @@ static int macsec_newlink(struct net *net, struct net_device *dev,
+ 
+ 	macsec->real_dev = real_dev;
+ 
+-	/* MACsec offloading is off by default */
+-	macsec->offload = MACSEC_OFFLOAD_OFF;
++	if (data && data[IFLA_MACSEC_OFFLOAD])
++		macsec->offload = nla_get_offload(data[IFLA_MACSEC_OFFLOAD]);
++	else
++		/* MACsec offloading is off by default */
++		macsec->offload = MACSEC_OFFLOAD_OFF;
++
++	/* Check if the offloading mode is supported by the underlying layers */
++	if (macsec->offload != MACSEC_OFFLOAD_OFF &&
++	    !macsec_check_offload(macsec->offload, macsec))
++		return -EOPNOTSUPP;
+ 
+ 	if (data && data[IFLA_MACSEC_ICV_LEN])
+ 		icv_len = nla_get_u8(data[IFLA_MACSEC_ICV_LEN]);
+@@ -4056,6 +4069,20 @@ static int macsec_newlink(struct net *net, struct net_device *dev,
+ 			goto del_dev;
+ 	}
+ 
++	/* If h/w offloading is available, propagate to the device */
++	if (macsec_is_offloaded(macsec)) {
++		const struct macsec_ops *ops;
++		struct macsec_context ctx;
++
++		ops = macsec_get_ops(macsec, &ctx);
++		if (ops) {
++			ctx.secy = &macsec->secy;
++			err = macsec_offload(ops->mdo_add_secy, &ctx);
++			if (err)
++				goto del_dev;
++		}
++	}
++
+ 	err = register_macsec_dev(real_dev, dev);
+ 	if (err < 0)
+ 		goto del_dev;
+diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h
+index d6ccd0105c050..e204c3e4dce10 100644
+--- a/include/uapi/linux/if_link.h
++++ b/include/uapi/linux/if_link.h
+@@ -463,6 +463,7 @@ enum {
+ 	IFLA_MACSEC_REPLAY_PROTECT,
+ 	IFLA_MACSEC_VALIDATION,
+ 	IFLA_MACSEC_PAD,
++	IFLA_MACSEC_OFFLOAD,
+ 	__IFLA_MACSEC_MAX,
+ };
+ 
+diff --git a/tools/include/uapi/linux/if_link.h b/tools/include/uapi/linux/if_link.h
+index 771371d5b9960..24cf6fe075f74 100644
+--- a/tools/include/uapi/linux/if_link.h
++++ b/tools/include/uapi/linux/if_link.h
+@@ -463,6 +463,7 @@ enum {
+ 	IFLA_MACSEC_REPLAY_PROTECT,
+ 	IFLA_MACSEC_VALIDATION,
+ 	IFLA_MACSEC_PAD,
++	IFLA_MACSEC_OFFLOAD,
+ 	__IFLA_MACSEC_MAX,
+ };
+ 
+-- 
+cgit 1.2.3-1.el7
+
diff --git a/target/linux/mediatek/patches-5.4/999-1775-v5.18-net-macsec-fix-NULL-dereference-in-macsec_upd_offload.patch b/target/linux/mediatek/patches-5.4/999-1775-v5.18-net-macsec-fix-NULL-dereference-in-macsec_upd_offload.patch
new file mode 100644
index 0000000..b885c0f
--- /dev/null
+++ b/target/linux/mediatek/patches-5.4/999-1775-v5.18-net-macsec-fix-NULL-dereference-in-macsec_upd_offload.patch
@@ -0,0 +1,35 @@
+From aa81700cf2326e288c9ca1fe7b544039617f1fc2 Mon Sep 17 00:00:00 2001
+From: Davide Caratti <dcaratti@redhat.com>
+Date: Mon, 6 Apr 2020 11:38:29 +0200
+Subject: macsec: fix NULL dereference in macsec_upd_offload()
+
+macsec_upd_offload() gets the value of MACSEC_OFFLOAD_ATTR_TYPE
+without checking its presence in the request message, and this causes
+a NULL dereference. Fix it rejecting any configuration that does not
+include this attribute.
+
+Reported-and-tested-by: syzbot+7022ab7c383875c17eff@syzkaller.appspotmail.com
+Fixes: dcb780fb2795 ("net: macsec: add nla support for changing the offloading selection")
+Signed-off-by: Davide Caratti <dcaratti@redhat.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+---
+ drivers/net/macsec.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
+index da82d7f16a09d..0d580d81d910f 100644
+--- a/drivers/net/macsec.c
++++ b/drivers/net/macsec.c
+@@ -2594,6 +2594,9 @@ static int macsec_upd_offload(struct sk_buff *skb, struct genl_info *info)
+ 		return PTR_ERR(dev);
+ 	macsec = macsec_priv(dev);
+ 
++	if (!tb_offload[MACSEC_OFFLOAD_ATTR_TYPE])
++		return -EINVAL;
++
+ 	offload = nla_get_u8(tb_offload[MACSEC_OFFLOAD_ATTR_TYPE]);
+ 	if (macsec->offload == offload)
+ 		return 0;
+-- 
+cgit 1.2.3-1.el7
+
diff --git a/target/linux/mediatek/patches-5.4/999-1776-v5.18-net-macsec-fix-using-wrong-structure-in-macsec_changelink.patch b/target/linux/mediatek/patches-5.4/999-1776-v5.18-net-macsec-fix-using-wrong-structure-in-macsec_changelink.patch
new file mode 100644
index 0000000..8d10c56
--- /dev/null
+++ b/target/linux/mediatek/patches-5.4/999-1776-v5.18-net-macsec-fix-using-wrong-structure-in-macsec_changelink.patch
@@ -0,0 +1,73 @@
+From 022e9d6090599c0593c78e87dc9ba98a290e6bc4 Mon Sep 17 00:00:00 2001
+From: Taehee Yoo <ap420073@gmail.com>
+Date: Thu, 9 Apr 2020 14:08:08 +0000
+Subject: net: macsec: fix using wrong structure in macsec_changelink()
+
+In the macsec_changelink(), "struct macsec_tx_sa tx_sc" is used to
+store "macsec_secy.tx_sc".
+But, the struct type of tx_sc is macsec_tx_sc, not macsec_tx_sa.
+So, the macsec_tx_sc should be used instead.
+
+Test commands:
+    ip link add dummy0 type dummy
+    ip link add macsec0 link dummy0 type macsec
+    ip link set macsec0 type macsec encrypt off
+
+Splat looks like:
+[61119.963483][ T9335] ==================================================================
+[61119.964709][ T9335] BUG: KASAN: slab-out-of-bounds in macsec_changelink.part.34+0xb6/0x200 [macsec]
+[61119.965787][ T9335] Read of size 160 at addr ffff888020d69c68 by task ip/9335
+[61119.966699][ T9335]
+[61119.966979][ T9335] CPU: 0 PID: 9335 Comm: ip Not tainted 5.6.0+ #503
+[61119.967791][ T9335] Hardware name: innotek GmbH VirtualBox/VirtualBox, BIOS VirtualBox 12/01/2006
+[61119.968914][ T9335] Call Trace:
+[61119.969324][ T9335]  dump_stack+0x96/0xdb
+[61119.969809][ T9335]  ? macsec_changelink.part.34+0xb6/0x200 [macsec]
+[61119.970554][ T9335]  print_address_description.constprop.5+0x1be/0x360
+[61119.971294][ T9335]  ? macsec_changelink.part.34+0xb6/0x200 [macsec]
+[61119.971973][ T9335]  ? macsec_changelink.part.34+0xb6/0x200 [macsec]
+[61119.972703][ T9335]  __kasan_report+0x12a/0x170
+[61119.973323][ T9335]  ? macsec_changelink.part.34+0xb6/0x200 [macsec]
+[61119.973942][ T9335]  kasan_report+0xe/0x20
+[61119.974397][ T9335]  check_memory_region+0x149/0x1a0
+[61119.974866][ T9335]  memcpy+0x1f/0x50
+[61119.975209][ T9335]  macsec_changelink.part.34+0xb6/0x200 [macsec]
+[61119.975825][ T9335]  ? macsec_get_stats64+0x3e0/0x3e0 [macsec]
+[61119.976451][ T9335]  ? kernel_text_address+0x111/0x120
+[61119.976990][ T9335]  ? pskb_expand_head+0x25f/0xe10
+[61119.977503][ T9335]  ? stack_trace_save+0x82/0xb0
+[61119.977986][ T9335]  ? memset+0x1f/0x40
+[61119.978397][ T9335]  ? __nla_validate_parse+0x98/0x1ab0
+[61119.978936][ T9335]  ? macsec_alloc_tfm+0x90/0x90 [macsec]
+[61119.979511][ T9335]  ? __kasan_slab_free+0x111/0x150
+[61119.980021][ T9335]  ? kfree+0xce/0x2f0
+[61119.980700][ T9335]  ? netlink_trim+0x196/0x1f0
+[61119.981420][ T9335]  ? nla_memcpy+0x90/0x90
+[61119.982036][ T9335]  ? register_lock_class+0x19e0/0x19e0
+[61119.982776][ T9335]  ? memcpy+0x34/0x50
+[61119.983327][ T9335]  __rtnl_newlink+0x922/0x1270
+[ ... ]
+
+Fixes: 3cf3227a21d1 ("net: macsec: hardware offloading infrastructure")
+Signed-off-by: Taehee Yoo <ap420073@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+---
+ drivers/net/macsec.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
+index 0d580d81d910f..a183250ff66ad 100644
+--- a/drivers/net/macsec.c
++++ b/drivers/net/macsec.c
+@@ -3809,7 +3809,7 @@ static int macsec_changelink(struct net_device *dev, struct nlattr *tb[],
+ 			     struct netlink_ext_ack *extack)
+ {
+ 	struct macsec_dev *macsec = macsec_priv(dev);
+-	struct macsec_tx_sa tx_sc;
++	struct macsec_tx_sc tx_sc;
+ 	struct macsec_secy secy;
+ 	int ret;
+ 
+-- 
+cgit 1.2.3-1.el7
+
diff --git a/target/linux/mediatek/patches-5.4/999-1777-v5.18-net-partially-revert-dynamic-lockdep-key-changes.patch b/target/linux/mediatek/patches-5.4/999-1777-v5.18-net-partially-revert-dynamic-lockdep-key-changes.patch
new file mode 100644
index 0000000..df1d001
--- /dev/null
+++ b/target/linux/mediatek/patches-5.4/999-1777-v5.18-net-partially-revert-dynamic-lockdep-key-changes.patch
@@ -0,0 +1,753 @@
+From 1a33e10e4a95cb109ff1145098175df3113313ef Mon Sep 17 00:00:00 2001
+From: Cong Wang <xiyou.wangcong@gmail.com>
+Date: Sat, 2 May 2020 22:22:19 -0700
+Subject: net: partially revert dynamic lockdep key changes
+
+This patch reverts the folowing commits:
+
+commit 064ff66e2bef84f1153087612032b5b9eab005bd
+"bonding: add missing netdev_update_lockdep_key()"
+
+commit 53d374979ef147ab51f5d632dfe20b14aebeccd0
+"net: avoid updating qdisc_xmit_lock_key in netdev_update_lockdep_key()"
+
+commit 1f26c0d3d24125992ab0026b0dab16c08df947c7
+"net: fix kernel-doc warning in <linux/netdevice.h>"
+
+commit ab92d68fc22f9afab480153bd82a20f6e2533769
+"net: core: add generic lockdep keys"
+
+but keeps the addr_list_lock_key because we still lock
+addr_list_lock nestedly on stack devices, unlikely xmit_lock
+this is safe because we don't take addr_list_lock on any fast
+path.
+
+Reported-and-tested-by: syzbot+aaa6fa4949cc5d9b7b25@syzkaller.appspotmail.com
+Cc: Dmitry Vyukov <dvyukov@google.com>
+Cc: Taehee Yoo <ap420073@gmail.com>
+Signed-off-by: Cong Wang <xiyou.wangcong@gmail.com>
+Acked-by: Taehee Yoo <ap420073@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+---
+ drivers/net/bonding/bond_main.c                   |  1 +
+ drivers/net/ethernet/netronome/nfp/nfp_net_repr.c | 16 ++++
+ drivers/net/hamradio/bpqether.c                   | 20 +++++
+ drivers/net/hyperv/netvsc_drv.c                   |  2 +
+ drivers/net/ipvlan/ipvlan_main.c                  |  2 +
+ drivers/net/macsec.c                              |  2 +
+ drivers/net/macvlan.c                             |  2 +
+ drivers/net/ppp/ppp_generic.c                     |  2 +
+ drivers/net/team/team.c                           |  1 +
+ drivers/net/vrf.c                                 |  1 +
+ drivers/net/wireless/intersil/hostap/hostap_hw.c  | 22 ++++++
+ include/linux/netdevice.h                         | 27 +++++--
+ net/8021q/vlan_dev.c                              | 21 ++++++
+ net/batman-adv/soft-interface.c                   | 30 ++++++++
+ net/bluetooth/6lowpan.c                           |  8 ++
+ net/core/dev.c                                    | 90 ++++++++++++++++++-----
+ net/dsa/slave.c                                   | 12 +++
+ net/ieee802154/6lowpan/core.c                     |  8 ++
+ net/l2tp/l2tp_eth.c                               |  1 +
+ net/netrom/af_netrom.c                            | 21 ++++++
+ net/rose/af_rose.c                                | 21 ++++++
+ net/sched/sch_generic.c                           | 17 +++--
+ 22 files changed, 294 insertions(+), 33 deletions(-)
+
+diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
+index 2e70e43c5df5c..d01871321d220 100644
+--- a/drivers/net/bonding/bond_main.c
++++ b/drivers/net/bonding/bond_main.c
+@@ -4898,6 +4898,7 @@ static int bond_init(struct net_device *bond_dev)
+ 	spin_lock_init(&bond->stats_lock);
+ 	lockdep_register_key(&bond->stats_lock_key);
+ 	lockdep_set_class(&bond->stats_lock, &bond->stats_lock_key);
++	netdev_lockdep_set_classes(bond_dev);
+ 
+ 	list_add_tail(&bond->bond_list, &bn->dev_list);
+ 
+diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
+index 79d72c88bbef2..b3cabc274121b 100644
+--- a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
++++ b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
+@@ -299,6 +299,20 @@ static void nfp_repr_clean(struct nfp_repr *repr)
+ 	nfp_port_free(repr->port);
+ }
+ 
++static struct lock_class_key nfp_repr_netdev_xmit_lock_key;
++
++static void nfp_repr_set_lockdep_class_one(struct net_device *dev,
++					   struct netdev_queue *txq,
++					   void *_unused)
++{
++	lockdep_set_class(&txq->_xmit_lock, &nfp_repr_netdev_xmit_lock_key);
++}
++
++static void nfp_repr_set_lockdep_class(struct net_device *dev)
++{
++	netdev_for_each_tx_queue(dev, nfp_repr_set_lockdep_class_one, NULL);
++}
++
+ int nfp_repr_init(struct nfp_app *app, struct net_device *netdev,
+ 		  u32 cmsg_port_id, struct nfp_port *port,
+ 		  struct net_device *pf_netdev)
+@@ -308,6 +322,8 @@ int nfp_repr_init(struct nfp_app *app, struct net_device *netdev,
+ 	u32 repr_cap = nn->tlv_caps.repr_cap;
+ 	int err;
+ 
++	nfp_repr_set_lockdep_class(netdev);
++
+ 	repr->port = port;
+ 	repr->dst = metadata_dst_alloc(0, METADATA_HW_PORT_MUX, GFP_KERNEL);
+ 	if (!repr->dst)
+diff --git a/drivers/net/hamradio/bpqether.c b/drivers/net/hamradio/bpqether.c
+index fbea6f232819e..206688154fdfa 100644
+--- a/drivers/net/hamradio/bpqether.c
++++ b/drivers/net/hamradio/bpqether.c
+@@ -107,6 +107,25 @@ struct bpqdev {
+ 
+ static LIST_HEAD(bpq_devices);
+ 
++/*
++ * bpqether network devices are paired with ethernet devices below them, so
++ * form a special "super class" of normal ethernet devices; split their locks
++ * off into a separate class since they always nest.
++ */
++static struct lock_class_key bpq_netdev_xmit_lock_key;
++
++static void bpq_set_lockdep_class_one(struct net_device *dev,
++				      struct netdev_queue *txq,
++				      void *_unused)
++{
++	lockdep_set_class(&txq->_xmit_lock, &bpq_netdev_xmit_lock_key);
++}
++
++static void bpq_set_lockdep_class(struct net_device *dev)
++{
++	netdev_for_each_tx_queue(dev, bpq_set_lockdep_class_one, NULL);
++}
++
+ /* ------------------------------------------------------------------------ */
+ 
+ 
+@@ -477,6 +496,7 @@ static int bpq_new_device(struct net_device *edev)
+ 	err = register_netdevice(ndev);
+ 	if (err)
+ 		goto error;
++	bpq_set_lockdep_class(ndev);
+ 
+ 	/* List protected by RTNL */
+ 	list_add_rcu(&bpq->bpq_list, &bpq_devices);
+diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
+index d8e86bdbfba1e..c0b647a4c8934 100644
+--- a/drivers/net/hyperv/netvsc_drv.c
++++ b/drivers/net/hyperv/netvsc_drv.c
+@@ -2456,6 +2456,8 @@ static int netvsc_probe(struct hv_device *dev,
+ 		NETIF_F_HW_VLAN_CTAG_RX;
+ 	net->vlan_features = net->features;
+ 
++	netdev_lockdep_set_classes(net);
++
+ 	/* MTU range: 68 - 1500 or 65521 */
+ 	net->min_mtu = NETVSC_MTU_MIN;
+ 	if (nvdev->nvsp_version >= NVSP_PROTOCOL_VERSION_2)
+diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c
+index f195f278a83aa..15e87c097b0b3 100644
+--- a/drivers/net/ipvlan/ipvlan_main.c
++++ b/drivers/net/ipvlan/ipvlan_main.c
+@@ -131,6 +131,8 @@ static int ipvlan_init(struct net_device *dev)
+ 	dev->gso_max_segs = phy_dev->gso_max_segs;
+ 	dev->hard_header_len = phy_dev->hard_header_len;
+ 
++	netdev_lockdep_set_classes(dev);
++
+ 	ipvlan->pcpu_stats = netdev_alloc_pcpu_stats(struct ipvl_pcpu_stats);
+ 	if (!ipvlan->pcpu_stats)
+ 		return -ENOMEM;
+diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
+index 758baf7cb8a16..ea3f25cc79efa 100644
+--- a/drivers/net/macsec.c
++++ b/drivers/net/macsec.c
+@@ -4047,6 +4047,8 @@ static int macsec_newlink(struct net *net, struct net_device *dev,
+ 	if (err < 0)
+ 		return err;
+ 
++	netdev_lockdep_set_classes(dev);
++
+ 	err = netdev_upper_dev_link(real_dev, dev, extack);
+ 	if (err < 0)
+ 		goto unregister;
+diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
+index d45600e0a38cd..34eb073cdd744 100644
+--- a/drivers/net/macvlan.c
++++ b/drivers/net/macvlan.c
+@@ -890,6 +890,8 @@ static int macvlan_init(struct net_device *dev)
+ 	dev->gso_max_segs	= lowerdev->gso_max_segs;
+ 	dev->hard_header_len	= lowerdev->hard_header_len;
+ 
++	netdev_lockdep_set_classes(dev);
++
+ 	vlan->pcpu_stats = netdev_alloc_pcpu_stats(struct vlan_pcpu_stats);
+ 	if (!vlan->pcpu_stats)
+ 		return -ENOMEM;
+diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
+index 22cc2cb9d878f..7d005896a0f93 100644
+--- a/drivers/net/ppp/ppp_generic.c
++++ b/drivers/net/ppp/ppp_generic.c
+@@ -1410,6 +1410,8 @@ static int ppp_dev_init(struct net_device *dev)
+ {
+ 	struct ppp *ppp;
+ 
++	netdev_lockdep_set_classes(dev);
++
+ 	ppp = netdev_priv(dev);
+ 	/* Let the netdevice take a reference on the ppp file. This ensures
+ 	 * that ppp_destroy_interface() won't run before the device gets
+diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
+index 04845a4017f93..8c1e02752ff61 100644
+--- a/drivers/net/team/team.c
++++ b/drivers/net/team/team.c
+@@ -1647,6 +1647,7 @@ static int team_init(struct net_device *dev)
+ 
+ 	lockdep_register_key(&team->team_lock_key);
+ 	__mutex_init(&team->lock, "team->team_lock_key", &team->team_lock_key);
++	netdev_lockdep_set_classes(dev);
+ 
+ 	return 0;
+ 
+diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
+index 56f8aab46f89b..43928a1c2f2a4 100644
+--- a/drivers/net/vrf.c
++++ b/drivers/net/vrf.c
+@@ -867,6 +867,7 @@ static int vrf_dev_init(struct net_device *dev)
+ 
+ 	/* similarly, oper state is irrelevant; set to up to avoid confusion */
+ 	dev->operstate = IF_OPER_UP;
++	netdev_lockdep_set_classes(dev);
+ 	return 0;
+ 
+ out_rth:
+diff --git a/drivers/net/wireless/intersil/hostap/hostap_hw.c b/drivers/net/wireless/intersil/hostap/hostap_hw.c
+index 58212c532c900..aadf3dec5bf32 100644
+--- a/drivers/net/wireless/intersil/hostap/hostap_hw.c
++++ b/drivers/net/wireless/intersil/hostap/hostap_hw.c
+@@ -3041,6 +3041,27 @@ static void prism2_clear_set_tim_queue(local_info_t *local)
+ 	}
+ }
+ 
++
++/*
++ * HostAP uses two layers of net devices, where the inner
++ * layer gets called all the time from the outer layer.
++ * This is a natural nesting, which needs a split lock type.
++ */
++static struct lock_class_key hostap_netdev_xmit_lock_key;
++
++static void prism2_set_lockdep_class_one(struct net_device *dev,
++					 struct netdev_queue *txq,
++					 void *_unused)
++{
++	lockdep_set_class(&txq->_xmit_lock,
++			  &hostap_netdev_xmit_lock_key);
++}
++
++static void prism2_set_lockdep_class(struct net_device *dev)
++{
++	netdev_for_each_tx_queue(dev, prism2_set_lockdep_class_one, NULL);
++}
++
+ static struct net_device *
+ prism2_init_local_data(struct prism2_helper_functions *funcs, int card_idx,
+ 		       struct device *sdev)
+@@ -3199,6 +3220,7 @@ while (0)
+ 	if (ret >= 0)
+ 		ret = register_netdevice(dev);
+ 
++	prism2_set_lockdep_class(dev);
+ 	rtnl_unlock();
+ 	if (ret < 0) {
+ 		printk(KERN_WARNING "%s: register netdevice failed!\n",
+diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
+index 5a8d40f1ffe2a..7725efd6e48a1 100644
+--- a/include/linux/netdevice.h
++++ b/include/linux/netdevice.h
+@@ -1805,13 +1805,11 @@ enum netdev_priv_flags {
+  *	@phydev:	Physical device may attach itself
+  *			for hardware timestamping
+  *	@sfp_bus:	attached &struct sfp_bus structure.
+- *	@qdisc_tx_busylock_key: lockdep class annotating Qdisc->busylock
+- *				spinlock
+- *	@qdisc_running_key:	lockdep class annotating Qdisc->running seqcount
+- *	@qdisc_xmit_lock_key:	lockdep class annotating
+- *				netdev_queue->_xmit_lock spinlock
++ *
+  *	@addr_list_lock_key:	lockdep class annotating
+  *				net_device->addr_list_lock spinlock
++ *	@qdisc_tx_busylock: lockdep class annotating Qdisc->busylock spinlock
++ *	@qdisc_running_key: lockdep class annotating Qdisc->running seqcount
+  *
+  *	@proto_down:	protocol port state information can be sent to the
+  *			switch driver and used to set the phys state of the
+@@ -2112,10 +2110,9 @@ struct net_device {
+ #endif
+ 	struct phy_device	*phydev;
+ 	struct sfp_bus		*sfp_bus;
+-	struct lock_class_key	qdisc_tx_busylock_key;
+-	struct lock_class_key	qdisc_running_key;
+-	struct lock_class_key	qdisc_xmit_lock_key;
+ 	struct lock_class_key	addr_list_lock_key;
++	struct lock_class_key	*qdisc_tx_busylock;
++	struct lock_class_key	*qdisc_running_key;
+ 	bool			proto_down;
+ 	unsigned		wol_enabled:1;
+ 
+@@ -2200,6 +2197,20 @@ static inline void netdev_for_each_tx_queue(struct net_device *dev,
+ 		f(dev, &dev->_tx[i], arg);
+ }
+ 
++#define netdev_lockdep_set_classes(dev)				\
++{								\
++	static struct lock_class_key qdisc_tx_busylock_key;	\
++	static struct lock_class_key qdisc_running_key;		\
++	static struct lock_class_key qdisc_xmit_lock_key;	\
++	unsigned int i;						\
++								\
++	(dev)->qdisc_tx_busylock = &qdisc_tx_busylock_key;	\
++	(dev)->qdisc_running_key = &qdisc_running_key;		\
++	for (i = 0; i < (dev)->num_tx_queues; i++)		\
++		lockdep_set_class(&(dev)->_tx[i]._xmit_lock,	\
++				  &qdisc_xmit_lock_key);	\
++}
++
+ u16 netdev_pick_tx(struct net_device *dev, struct sk_buff *skb,
+ 		     struct net_device *sb_dev);
+ struct netdev_queue *netdev_core_pick_tx(struct net_device *dev,
+diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
+index 990b9fde28c65..319220b2341dd 100644
+--- a/net/8021q/vlan_dev.c
++++ b/net/8021q/vlan_dev.c
+@@ -489,6 +489,25 @@ static void vlan_dev_set_rx_mode(struct net_device *vlan_dev)
+ 	dev_uc_sync(vlan_dev_priv(vlan_dev)->real_dev, vlan_dev);
+ }
+ 
++/*
++ * vlan network devices have devices nesting below it, and are a special
++ * "super class" of normal network devices; split their locks off into a
++ * separate class since they always nest.
++ */
++static struct lock_class_key vlan_netdev_xmit_lock_key;
++
++static void vlan_dev_set_lockdep_one(struct net_device *dev,
++				     struct netdev_queue *txq,
++				     void *unused)
++{
++	lockdep_set_class(&txq->_xmit_lock, &vlan_netdev_xmit_lock_key);
++}
++
++static void vlan_dev_set_lockdep_class(struct net_device *dev)
++{
++	netdev_for_each_tx_queue(dev, vlan_dev_set_lockdep_one, NULL);
++}
++
+ static const struct header_ops vlan_header_ops = {
+ 	.create	 = vlan_dev_hard_header,
+ 	.parse	 = eth_header_parse,
+@@ -579,6 +598,8 @@ static int vlan_dev_init(struct net_device *dev)
+ 
+ 	SET_NETDEV_DEVTYPE(dev, &vlan_type);
+ 
++	vlan_dev_set_lockdep_class(dev);
++
+ 	vlan->vlan_pcpu_stats = netdev_alloc_pcpu_stats(struct vlan_pcpu_stats);
+ 	if (!vlan->vlan_pcpu_stats)
+ 		return -ENOMEM;
+diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
+index 5f05a728f347a..822af540b8540 100644
+--- a/net/batman-adv/soft-interface.c
++++ b/net/batman-adv/soft-interface.c
+@@ -739,6 +739,34 @@ static int batadv_interface_kill_vid(struct net_device *dev, __be16 proto,
+ 	return 0;
+ }
+ 
++/* batman-adv network devices have devices nesting below it and are a special
++ * "super class" of normal network devices; split their locks off into a
++ * separate class since they always nest.
++ */
++static struct lock_class_key batadv_netdev_xmit_lock_key;
++
++/**
++ * batadv_set_lockdep_class_one() - Set lockdep class for a single tx queue
++ * @dev: device which owns the tx queue
++ * @txq: tx queue to modify
++ * @_unused: always NULL
++ */
++static void batadv_set_lockdep_class_one(struct net_device *dev,
++					 struct netdev_queue *txq,
++					 void *_unused)
++{
++	lockdep_set_class(&txq->_xmit_lock, &batadv_netdev_xmit_lock_key);
++}
++
++/**
++ * batadv_set_lockdep_class() - Set txq and addr_list lockdep class
++ * @dev: network device to modify
++ */
++static void batadv_set_lockdep_class(struct net_device *dev)
++{
++	netdev_for_each_tx_queue(dev, batadv_set_lockdep_class_one, NULL);
++}
++
+ /**
+  * batadv_softif_init_late() - late stage initialization of soft interface
+  * @dev: registered network device to modify
+@@ -752,6 +780,8 @@ static int batadv_softif_init_late(struct net_device *dev)
+ 	int ret;
+ 	size_t cnt_len = sizeof(u64) * BATADV_CNT_NUM;
+ 
++	batadv_set_lockdep_class(dev);
++
+ 	bat_priv = netdev_priv(dev);
+ 	bat_priv->soft_iface = dev;
+ 
+diff --git a/net/bluetooth/6lowpan.c b/net/bluetooth/6lowpan.c
+index 4febc82a7c761..bb55d92691b06 100644
+--- a/net/bluetooth/6lowpan.c
++++ b/net/bluetooth/6lowpan.c
+@@ -571,7 +571,15 @@ static netdev_tx_t bt_xmit(struct sk_buff *skb, struct net_device *netdev)
+ 	return err < 0 ? NET_XMIT_DROP : err;
+ }
+ 
++static int bt_dev_init(struct net_device *dev)
++{
++	netdev_lockdep_set_classes(dev);
++
++	return 0;
++}
++
+ static const struct net_device_ops netdev_ops = {
++	.ndo_init		= bt_dev_init,
+ 	.ndo_start_xmit		= bt_xmit,
+ };
+ 
+diff --git a/net/core/dev.c b/net/core/dev.c
+index afff16849c261..f8d83922a6afb 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -398,6 +398,74 @@ static RAW_NOTIFIER_HEAD(netdev_chain);
+ DEFINE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
+ EXPORT_PER_CPU_SYMBOL(softnet_data);
+ 
++#ifdef CONFIG_LOCKDEP
++/*
++ * register_netdevice() inits txq->_xmit_lock and sets lockdep class
++ * according to dev->type
++ */
++static const unsigned short netdev_lock_type[] = {
++	 ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25,
++	 ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET,
++	 ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM,
++	 ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP,
++	 ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD,
++	 ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25,
++	 ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP,
++	 ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD,
++	 ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI,
++	 ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE,
++	 ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET,
++	 ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL,
++	 ARPHRD_FCFABRIC, ARPHRD_IEEE80211, ARPHRD_IEEE80211_PRISM,
++	 ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET, ARPHRD_PHONET_PIPE,
++	 ARPHRD_IEEE802154, ARPHRD_VOID, ARPHRD_NONE};
++
++static const char *const netdev_lock_name[] = {
++	"_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
++	"_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET",
++	"_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM",
++	"_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP",
++	"_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD",
++	"_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25",
++	"_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP",
++	"_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD",
++	"_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI",
++	"_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE",
++	"_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET",
++	"_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
++	"_xmit_FCFABRIC", "_xmit_IEEE80211", "_xmit_IEEE80211_PRISM",
++	"_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET", "_xmit_PHONET_PIPE",
++	"_xmit_IEEE802154", "_xmit_VOID", "_xmit_NONE"};
++
++static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];
++
++static inline unsigned short netdev_lock_pos(unsigned short dev_type)
++{
++	int i;
++
++	for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++)
++		if (netdev_lock_type[i] == dev_type)
++			return i;
++	/* the last key is used by default */
++	return ARRAY_SIZE(netdev_lock_type) - 1;
++}
++
++static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
++						 unsigned short dev_type)
++{
++	int i;
++
++	i = netdev_lock_pos(dev_type);
++	lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i],
++				   netdev_lock_name[i]);
++}
++#else
++static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
++						 unsigned short dev_type)
++{
++}
++#endif
++
+ /*******************************************************************************
+  *
+  *		Protocol management and registration routines
+@@ -9208,7 +9276,7 @@ static void netdev_init_one_queue(struct net_device *dev,
+ {
+ 	/* Initialize queue lock */
+ 	spin_lock_init(&queue->_xmit_lock);
+-	lockdep_set_class(&queue->_xmit_lock, &dev->qdisc_xmit_lock_key);
++	netdev_set_xmit_lockdep_class(&queue->_xmit_lock, dev->type);
+ 	queue->xmit_lock_owner = -1;
+ 	netdev_queue_numa_node_write(queue, NUMA_NO_NODE);
+ 	queue->dev = dev;
+@@ -9255,22 +9323,6 @@ void netif_tx_stop_all_queues(struct net_device *dev)
+ }
+ EXPORT_SYMBOL(netif_tx_stop_all_queues);
+ 
+-static void netdev_register_lockdep_key(struct net_device *dev)
+-{
+-	lockdep_register_key(&dev->qdisc_tx_busylock_key);
+-	lockdep_register_key(&dev->qdisc_running_key);
+-	lockdep_register_key(&dev->qdisc_xmit_lock_key);
+-	lockdep_register_key(&dev->addr_list_lock_key);
+-}
+-
+-static void netdev_unregister_lockdep_key(struct net_device *dev)
+-{
+-	lockdep_unregister_key(&dev->qdisc_tx_busylock_key);
+-	lockdep_unregister_key(&dev->qdisc_running_key);
+-	lockdep_unregister_key(&dev->qdisc_xmit_lock_key);
+-	lockdep_unregister_key(&dev->addr_list_lock_key);
+-}
+-
+ void netdev_update_lockdep_key(struct net_device *dev)
+ {
+ 	lockdep_unregister_key(&dev->addr_list_lock_key);
+@@ -9837,7 +9889,7 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
+ 
+ 	dev_net_set(dev, &init_net);
+ 
+-	netdev_register_lockdep_key(dev);
++	lockdep_register_key(&dev->addr_list_lock_key);
+ 
+ 	dev->gso_max_size = GSO_MAX_SIZE;
+ 	dev->gso_max_segs = GSO_MAX_SEGS;
+@@ -9926,7 +9978,7 @@ void free_netdev(struct net_device *dev)
+ 	free_percpu(dev->xdp_bulkq);
+ 	dev->xdp_bulkq = NULL;
+ 
+-	netdev_unregister_lockdep_key(dev);
++	lockdep_unregister_key(&dev->addr_list_lock_key);
+ 
+ 	/*  Compatibility with error handling in drivers */
+ 	if (dev->reg_state == NETREG_UNINITIALIZED) {
+diff --git a/net/dsa/slave.c b/net/dsa/slave.c
+index ba8bf90dc0cc1..fa26340437519 100644
+--- a/net/dsa/slave.c
++++ b/net/dsa/slave.c
+@@ -1671,6 +1671,15 @@ static int dsa_slave_phy_setup(struct net_device *slave_dev)
+ 	return ret;
+ }
+ 
++static struct lock_class_key dsa_slave_netdev_xmit_lock_key;
++static void dsa_slave_set_lockdep_class_one(struct net_device *dev,
++					    struct netdev_queue *txq,
++					    void *_unused)
++{
++	lockdep_set_class(&txq->_xmit_lock,
++			  &dsa_slave_netdev_xmit_lock_key);
++}
++
+ int dsa_slave_suspend(struct net_device *slave_dev)
+ {
+ 	struct dsa_port *dp = dsa_slave_to_port(slave_dev);
+@@ -1754,6 +1763,9 @@ int dsa_slave_create(struct dsa_port *port)
+ 		slave_dev->max_mtu = ETH_MAX_MTU;
+ 	SET_NETDEV_DEVTYPE(slave_dev, &dsa_type);
+ 
++	netdev_for_each_tx_queue(slave_dev, dsa_slave_set_lockdep_class_one,
++				 NULL);
++
+ 	SET_NETDEV_DEV(slave_dev, port->ds->dev);
+ 	slave_dev->dev.of_node = port->dn;
+ 	slave_dev->vlan_features = master->vlan_features;
+diff --git a/net/ieee802154/6lowpan/core.c b/net/ieee802154/6lowpan/core.c
+index c0b107cdd7153..3297e7fa99458 100644
+--- a/net/ieee802154/6lowpan/core.c
++++ b/net/ieee802154/6lowpan/core.c
+@@ -58,6 +58,13 @@ static const struct header_ops lowpan_header_ops = {
+ 	.create	= lowpan_header_create,
+ };
+ 
++static int lowpan_dev_init(struct net_device *ldev)
++{
++	netdev_lockdep_set_classes(ldev);
++
++	return 0;
++}
++
+ static int lowpan_open(struct net_device *dev)
+ {
+ 	if (!open_count)
+@@ -89,6 +96,7 @@ static int lowpan_get_iflink(const struct net_device *dev)
+ }
+ 
+ static const struct net_device_ops lowpan_netdev_ops = {
++	.ndo_init		= lowpan_dev_init,
+ 	.ndo_start_xmit		= lowpan_xmit,
+ 	.ndo_open		= lowpan_open,
+ 	.ndo_stop		= lowpan_stop,
+diff --git a/net/l2tp/l2tp_eth.c b/net/l2tp/l2tp_eth.c
+index d3b520b9b2c9d..fd5ac2788e45c 100644
+--- a/net/l2tp/l2tp_eth.c
++++ b/net/l2tp/l2tp_eth.c
+@@ -56,6 +56,7 @@ static int l2tp_eth_dev_init(struct net_device *dev)
+ {
+ 	eth_hw_addr_random(dev);
+ 	eth_broadcast_addr(dev->broadcast);
++	netdev_lockdep_set_classes(dev);
+ 
+ 	return 0;
+ }
+diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c
+index 7b1a74f74aad5..eccc7d366e17f 100644
+--- a/net/netrom/af_netrom.c
++++ b/net/netrom/af_netrom.c
+@@ -63,6 +63,26 @@ static DEFINE_SPINLOCK(nr_list_lock);
+ 
+ static const struct proto_ops nr_proto_ops;
+ 
++/*
++ * NETROM network devices are virtual network devices encapsulating NETROM
++ * frames into AX.25 which will be sent through an AX.25 device, so form a
++ * special "super class" of normal net devices; split their locks off into a
++ * separate class since they always nest.
++ */
++static struct lock_class_key nr_netdev_xmit_lock_key;
++
++static void nr_set_lockdep_one(struct net_device *dev,
++			       struct netdev_queue *txq,
++			       void *_unused)
++{
++	lockdep_set_class(&txq->_xmit_lock, &nr_netdev_xmit_lock_key);
++}
++
++static void nr_set_lockdep_key(struct net_device *dev)
++{
++	netdev_for_each_tx_queue(dev, nr_set_lockdep_one, NULL);
++}
++
+ /*
+  *	Socket removal during an interrupt is now safe.
+  */
+@@ -1394,6 +1414,7 @@ static int __init nr_proto_init(void)
+ 			free_netdev(dev);
+ 			goto fail;
+ 		}
++		nr_set_lockdep_key(dev);
+ 		dev_nr[i] = dev;
+ 	}
+ 
+diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c
+index 1e8eeb044b07d..e7a872207b464 100644
+--- a/net/rose/af_rose.c
++++ b/net/rose/af_rose.c
+@@ -64,6 +64,26 @@ static const struct proto_ops rose_proto_ops;
+ 
+ ax25_address rose_callsign;
+ 
++/*
++ * ROSE network devices are virtual network devices encapsulating ROSE
++ * frames into AX.25 which will be sent through an AX.25 device, so form a
++ * special "super class" of normal net devices; split their locks off into a
++ * separate class since they always nest.
++ */
++static struct lock_class_key rose_netdev_xmit_lock_key;
++
++static void rose_set_lockdep_one(struct net_device *dev,
++				 struct netdev_queue *txq,
++				 void *_unused)
++{
++	lockdep_set_class(&txq->_xmit_lock, &rose_netdev_xmit_lock_key);
++}
++
++static void rose_set_lockdep_key(struct net_device *dev)
++{
++	netdev_for_each_tx_queue(dev, rose_set_lockdep_one, NULL);
++}
++
+ /*
+  *	Convert a ROSE address into text.
+  */
+@@ -1511,6 +1531,7 @@ static int __init rose_proto_init(void)
+ 			free_netdev(dev);
+ 			goto fail;
+ 		}
++		rose_set_lockdep_key(dev);
+ 		dev_rose[i] = dev;
+ 	}
+ 
+diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
+index ad24fa1a51e63..ebc55d8842473 100644
+--- a/net/sched/sch_generic.c
++++ b/net/sched/sch_generic.c
+@@ -794,6 +794,9 @@ struct Qdisc_ops pfifo_fast_ops __read_mostly = {
+ };
+ EXPORT_SYMBOL(pfifo_fast_ops);
+ 
++static struct lock_class_key qdisc_tx_busylock;
++static struct lock_class_key qdisc_running_key;
++
+ struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
+ 			  const struct Qdisc_ops *ops,
+ 			  struct netlink_ext_ack *extack)
+@@ -846,9 +849,17 @@ struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
+ 	}
+ 
+ 	spin_lock_init(&sch->busylock);
++	lockdep_set_class(&sch->busylock,
++			  dev->qdisc_tx_busylock ?: &qdisc_tx_busylock);
++
+ 	/* seqlock has the same scope of busylock, for NOLOCK qdisc */
+ 	spin_lock_init(&sch->seqlock);
++	lockdep_set_class(&sch->busylock,
++			  dev->qdisc_tx_busylock ?: &qdisc_tx_busylock);
++
+ 	seqcount_init(&sch->running);
++	lockdep_set_class(&sch->running,
++			  dev->qdisc_running_key ?: &qdisc_running_key);
+ 
+ 	sch->ops = ops;
+ 	sch->flags = ops->static_flags;
+@@ -859,12 +870,6 @@ struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
+ 	dev_hold(dev);
+ 	refcount_set(&sch->refcnt, 1);
+ 
+-	if (sch != &noop_qdisc) {
+-		lockdep_set_class(&sch->busylock, &dev->qdisc_tx_busylock_key);
+-		lockdep_set_class(&sch->seqlock, &dev->qdisc_tx_busylock_key);
+-		lockdep_set_class(&sch->running, &dev->qdisc_running_key);
+-	}
+-
+ 	return sch;
+ errout1:
+ 	kfree(p);
+-- 
+cgit 1.2.3-1.el7
+
diff --git a/target/linux/mediatek/patches-5.4/999-1778-v5.18-net-macsec-fix-rtnl-locking-issue.patch b/target/linux/mediatek/patches-5.4/999-1778-v5.18-net-macsec-fix-rtnl-locking-issue.patch
new file mode 100644
index 0000000..1ffaa86
--- /dev/null
+++ b/target/linux/mediatek/patches-5.4/999-1778-v5.18-net-macsec-fix-rtnl-locking-issue.patch
@@ -0,0 +1,38 @@
+From 29ca3cdfe13b2792b8624e6f769777e8cb387f9c Mon Sep 17 00:00:00 2001
+From: Antoine Tenart <antoine.tenart@bootlin.com>
+Date: Wed, 6 May 2020 15:58:30 +0200
+Subject: net: macsec: fix rtnl locking issue
+
+netdev_update_features() must be called with the rtnl lock taken. Not
+doing so triggers a warning, as ASSERT_RTNL() is used in
+__netdev_update_features(), the first function called by
+netdev_update_features(). Fix this.
+
+Fixes: c850240b6c41 ("net: macsec: report real_dev features when HW offloading is enabled")
+Signed-off-by: Antoine Tenart <antoine.tenart@bootlin.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+---
+ drivers/net/macsec.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
+index d4034025c87c1..d0d31cb991803 100644
+--- a/drivers/net/macsec.c
++++ b/drivers/net/macsec.c
+@@ -2641,11 +2641,12 @@ static int macsec_upd_offload(struct sk_buff *skb, struct genl_info *info)
+ 	if (ret)
+ 		goto rollback;
+ 
+-	rtnl_unlock();
+ 	/* Force features update, since they are different for SW MACSec and
+ 	 * HW offloading cases.
+ 	 */
+ 	netdev_update_features(dev);
++
++	rtnl_unlock();
+ 	return 0;
+ 
+ rollback:
+-- 
+cgit 1.2.3-1.el7
+
diff --git a/target/linux/mediatek/patches-5.4/999-1779-v5.18-net-change-addr_list_lock-back-to-static-key.patch b/target/linux/mediatek/patches-5.4/999-1779-v5.18-net-change-addr_list_lock-back-to-static-key.patch
new file mode 100644
index 0000000..7ab7e00
--- /dev/null
+++ b/target/linux/mediatek/patches-5.4/999-1779-v5.18-net-change-addr_list_lock-back-to-static-key.patch
@@ -0,0 +1,550 @@
+From 845e0ebb4408d4473cf60d21224a897037e9a77a Mon Sep 17 00:00:00 2001
+From: Cong Wang <xiyou.wangcong@gmail.com>
+Date: Mon, 8 Jun 2020 14:53:01 -0700
+Subject: net: change addr_list_lock back to static key
+
+The dynamic key update for addr_list_lock still causes troubles,
+for example the following race condition still exists:
+
+CPU 0:				CPU 1:
+(RCU read lock)			(RTNL lock)
+dev_mc_seq_show()		netdev_update_lockdep_key()
+				  -> lockdep_unregister_key()
+ -> netif_addr_lock_bh()
+
+because lockdep doesn't provide an API to update it atomically.
+Therefore, we have to move it back to static keys and use subclass
+for nest locking like before.
+
+In commit 1a33e10e4a95 ("net: partially revert dynamic lockdep key
+changes"), I already reverted most parts of commit ab92d68fc22f
+("net: core: add generic lockdep keys").
+
+This patch reverts the rest and also part of commit f3b0a18bb6cb
+("net: remove unnecessary variables and callback"). After this
+patch, addr_list_lock changes back to using static keys and
+subclasses to satisfy lockdep. Thanks to dev->lower_level, we do
+not have to change back to ->ndo_get_lock_subclass().
+
+And hopefully this reduces some syzbot lockdep noises too.
+
+Reported-by: syzbot+f3a0e80c34b3fc28ac5e@syzkaller.appspotmail.com
+Cc: Taehee Yoo <ap420073@gmail.com>
+Cc: Dmitry Vyukov <dvyukov@google.com>
+Signed-off-by: Cong Wang <xiyou.wangcong@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+---
+ drivers/net/bonding/bond_main.c                  |  2 --
+ drivers/net/bonding/bond_options.c               |  2 --
+ drivers/net/hamradio/bpqether.c                  |  2 ++
+ drivers/net/macsec.c                             |  5 ++++
+ drivers/net/macvlan.c                            | 13 ++++++++--
+ drivers/net/vxlan.c                              |  4 +---
+ drivers/net/wireless/intersil/hostap/hostap_hw.c |  3 +++
+ include/linux/netdevice.h                        | 12 ++++++----
+ net/8021q/vlan_dev.c                             |  8 +++++--
+ net/batman-adv/soft-interface.c                  |  2 ++
+ net/bridge/br_device.c                           |  8 +++++++
+ net/core/dev.c                                   | 30 +++++++++++++-----------
+ net/core/dev_addr_lists.c                        | 12 +++++-----
+ net/core/rtnetlink.c                             |  1 -
+ net/dsa/master.c                                 |  4 ++++
+ net/netrom/af_netrom.c                           |  2 ++
+ net/rose/af_rose.c                               |  2 ++
+ 17 files changed, 76 insertions(+), 36 deletions(-)
+
+diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
+index a25c65d4af716..004919aea5fbf 100644
+--- a/drivers/net/bonding/bond_main.c
++++ b/drivers/net/bonding/bond_main.c
+@@ -3687,8 +3687,6 @@ static int bond_do_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cmd
+ 	case BOND_RELEASE_OLD:
+ 	case SIOCBONDRELEASE:
+ 		res = bond_release(bond_dev, slave_dev);
+-		if (!res)
+-			netdev_update_lockdep_key(slave_dev);
+ 		break;
+ 	case BOND_SETHWADDR_OLD:
+ 	case SIOCBONDSETHWADDR:
+diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c
+index 215c109232893..ddb3916d3506b 100644
+--- a/drivers/net/bonding/bond_options.c
++++ b/drivers/net/bonding/bond_options.c
+@@ -1398,8 +1398,6 @@ static int bond_option_slaves_set(struct bonding *bond,
+ 	case '-':
+ 		slave_dbg(bond->dev, dev, "Releasing interface\n");
+ 		ret = bond_release(bond->dev, dev);
+-		if (!ret)
+-			netdev_update_lockdep_key(dev);
+ 		break;
+ 
+ 	default:
+diff --git a/drivers/net/hamradio/bpqether.c b/drivers/net/hamradio/bpqether.c
+index 60dcaf2a04a91..1ad6085994b1c 100644
+--- a/drivers/net/hamradio/bpqether.c
++++ b/drivers/net/hamradio/bpqether.c
+@@ -113,6 +113,7 @@ static LIST_HEAD(bpq_devices);
+  * off into a separate class since they always nest.
+  */
+ static struct lock_class_key bpq_netdev_xmit_lock_key;
++static struct lock_class_key bpq_netdev_addr_lock_key;
+ 
+ static void bpq_set_lockdep_class_one(struct net_device *dev,
+ 				      struct netdev_queue *txq,
+@@ -123,6 +124,7 @@ static void bpq_set_lockdep_class_one(struct net_device *dev,
+ 
+ static void bpq_set_lockdep_class(struct net_device *dev)
+ {
++	lockdep_set_class(&dev->addr_list_lock, &bpq_netdev_addr_lock_key);
+ 	netdev_for_each_tx_queue(dev, bpq_set_lockdep_class_one, NULL);
+ }
+ 
+diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
+index 20b53e255f68a..e56547bfdac9a 100644
+--- a/drivers/net/macsec.c
++++ b/drivers/net/macsec.c
+@@ -3999,6 +3999,8 @@ static int macsec_add_dev(struct net_device *dev, sci_t sci, u8 icv_len)
+ 	return 0;
+ }
+ 
++static struct lock_class_key macsec_netdev_addr_lock_key;
++
+ static int macsec_newlink(struct net *net, struct net_device *dev,
+ 			  struct nlattr *tb[], struct nlattr *data[],
+ 			  struct netlink_ext_ack *extack)
+@@ -4050,6 +4052,9 @@ static int macsec_newlink(struct net *net, struct net_device *dev,
+ 		return err;
+ 
+ 	netdev_lockdep_set_classes(dev);
++	lockdep_set_class_and_subclass(&dev->addr_list_lock,
++				       &macsec_netdev_addr_lock_key,
++				       dev->lower_level);
+ 
+ 	err = netdev_upper_dev_link(real_dev, dev, extack);
+ 	if (err < 0)
+diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
+index 563aed5b3d9fe..6a6cc9f753075 100644
+--- a/drivers/net/macvlan.c
++++ b/drivers/net/macvlan.c
+@@ -860,6 +860,8 @@ static int macvlan_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+  * "super class" of normal network devices; split their locks off into a
+  * separate class since they always nest.
+  */
++static struct lock_class_key macvlan_netdev_addr_lock_key;
++
+ #define ALWAYS_ON_OFFLOADS \
+ 	(NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_GSO_SOFTWARE | \
+ 	 NETIF_F_GSO_ROBUST | NETIF_F_GSO_ENCAP_ALL)
+@@ -875,6 +877,14 @@ static int macvlan_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+ #define MACVLAN_STATE_MASK \
+ 	((1<<__LINK_STATE_NOCARRIER) | (1<<__LINK_STATE_DORMANT))
+ 
++static void macvlan_set_lockdep_class(struct net_device *dev)
++{
++	netdev_lockdep_set_classes(dev);
++	lockdep_set_class_and_subclass(&dev->addr_list_lock,
++				       &macvlan_netdev_addr_lock_key,
++				       dev->lower_level);
++}
++
+ static int macvlan_init(struct net_device *dev)
+ {
+ 	struct macvlan_dev *vlan = netdev_priv(dev);
+@@ -892,8 +902,7 @@ static int macvlan_init(struct net_device *dev)
+ 	dev->gso_max_size	= lowerdev->gso_max_size;
+ 	dev->gso_max_segs	= lowerdev->gso_max_segs;
+ 	dev->hard_header_len	= lowerdev->hard_header_len;
+-
+-	netdev_lockdep_set_classes(dev);
++	macvlan_set_lockdep_class(dev);
+ 
+ 	vlan->pcpu_stats = netdev_alloc_pcpu_stats(struct vlan_pcpu_stats);
+ 	if (!vlan->pcpu_stats)
+diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
+index 5bb448ae6c9c4..47424b2da6437 100644
+--- a/drivers/net/vxlan.c
++++ b/drivers/net/vxlan.c
+@@ -4245,10 +4245,8 @@ static int vxlan_changelink(struct net_device *dev, struct nlattr *tb[],
+ 		mod_timer(&vxlan->age_timer, jiffies);
+ 
+ 	netdev_adjacent_change_commit(dst->remote_dev, lowerdev, dev);
+-	if (lowerdev && lowerdev != dst->remote_dev) {
++	if (lowerdev && lowerdev != dst->remote_dev)
+ 		dst->remote_dev = lowerdev;
+-		netdev_update_lockdep_key(lowerdev);
+-	}
+ 	vxlan_config_apply(dev, &conf, lowerdev, vxlan->net, true);
+ 	return 0;
+ }
+diff --git a/drivers/net/wireless/intersil/hostap/hostap_hw.c b/drivers/net/wireless/intersil/hostap/hostap_hw.c
+index aadf3dec5bf32..2ab34cf74ecc3 100644
+--- a/drivers/net/wireless/intersil/hostap/hostap_hw.c
++++ b/drivers/net/wireless/intersil/hostap/hostap_hw.c
+@@ -3048,6 +3048,7 @@ static void prism2_clear_set_tim_queue(local_info_t *local)
+  * This is a natural nesting, which needs a split lock type.
+  */
+ static struct lock_class_key hostap_netdev_xmit_lock_key;
++static struct lock_class_key hostap_netdev_addr_lock_key;
+ 
+ static void prism2_set_lockdep_class_one(struct net_device *dev,
+ 					 struct netdev_queue *txq,
+@@ -3059,6 +3060,8 @@ static void prism2_set_lockdep_class_one(struct net_device *dev,
+ 
+ static void prism2_set_lockdep_class(struct net_device *dev)
+ {
++	lockdep_set_class(&dev->addr_list_lock,
++			  &hostap_netdev_addr_lock_key);
+ 	netdev_for_each_tx_queue(dev, prism2_set_lockdep_class_one, NULL);
+ }
+ 
+diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
+index 1a96e9c4ec36f..e2825e27ef89d 100644
+--- a/include/linux/netdevice.h
++++ b/include/linux/netdevice.h
+@@ -1821,8 +1821,6 @@ enum netdev_priv_flags {
+  *			for hardware timestamping
+  *	@sfp_bus:	attached &struct sfp_bus structure.
+  *
+- *	@addr_list_lock_key:	lockdep class annotating
+- *				net_device->addr_list_lock spinlock
+  *	@qdisc_tx_busylock: lockdep class annotating Qdisc->busylock spinlock
+  *	@qdisc_running_key: lockdep class annotating Qdisc->running seqcount
+  *
+@@ -2125,7 +2123,6 @@ struct net_device {
+ #endif
+ 	struct phy_device	*phydev;
+ 	struct sfp_bus		*sfp_bus;
+-	struct lock_class_key	addr_list_lock_key;
+ 	struct lock_class_key	*qdisc_tx_busylock;
+ 	struct lock_class_key	*qdisc_running_key;
+ 	bool			proto_down;
+@@ -2217,10 +2214,13 @@ static inline void netdev_for_each_tx_queue(struct net_device *dev,
+ 	static struct lock_class_key qdisc_tx_busylock_key;	\
+ 	static struct lock_class_key qdisc_running_key;		\
+ 	static struct lock_class_key qdisc_xmit_lock_key;	\
++	static struct lock_class_key dev_addr_list_lock_key;	\
+ 	unsigned int i;						\
+ 								\
+ 	(dev)->qdisc_tx_busylock = &qdisc_tx_busylock_key;	\
+ 	(dev)->qdisc_running_key = &qdisc_running_key;		\
++	lockdep_set_class(&(dev)->addr_list_lock,		\
++			  &dev_addr_list_lock_key);		\
+ 	for (i = 0; i < (dev)->num_tx_queues; i++)		\
+ 		lockdep_set_class(&(dev)->_tx[i]._xmit_lock,	\
+ 				  &qdisc_xmit_lock_key);	\
+@@ -3253,7 +3253,6 @@ static inline void netif_stop_queue(struct net_device *dev)
+ }
+ 
+ void netif_tx_stop_all_queues(struct net_device *dev);
+-void netdev_update_lockdep_key(struct net_device *dev);
+ 
+ static inline bool netif_tx_queue_stopped(const struct netdev_queue *dev_queue)
+ {
+@@ -4239,6 +4238,11 @@ static inline void netif_addr_lock(struct net_device *dev)
+ 	spin_lock(&dev->addr_list_lock);
+ }
+ 
++static inline void netif_addr_lock_nested(struct net_device *dev)
++{
++	spin_lock_nested(&dev->addr_list_lock, dev->lower_level);
++}
++
+ static inline void netif_addr_lock_bh(struct net_device *dev)
+ {
+ 	spin_lock_bh(&dev->addr_list_lock);
+diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
+index f00bb57f0f600..c8d6a07e23c57 100644
+--- a/net/8021q/vlan_dev.c
++++ b/net/8021q/vlan_dev.c
+@@ -494,6 +494,7 @@ static void vlan_dev_set_rx_mode(struct net_device *vlan_dev)
+  * separate class since they always nest.
+  */
+ static struct lock_class_key vlan_netdev_xmit_lock_key;
++static struct lock_class_key vlan_netdev_addr_lock_key;
+ 
+ static void vlan_dev_set_lockdep_one(struct net_device *dev,
+ 				     struct netdev_queue *txq,
+@@ -502,8 +503,11 @@ static void vlan_dev_set_lockdep_one(struct net_device *dev,
+ 	lockdep_set_class(&txq->_xmit_lock, &vlan_netdev_xmit_lock_key);
+ }
+ 
+-static void vlan_dev_set_lockdep_class(struct net_device *dev)
++static void vlan_dev_set_lockdep_class(struct net_device *dev, int subclass)
+ {
++	lockdep_set_class_and_subclass(&dev->addr_list_lock,
++				       &vlan_netdev_addr_lock_key,
++				       subclass);
+ 	netdev_for_each_tx_queue(dev, vlan_dev_set_lockdep_one, NULL);
+ }
+ 
+@@ -597,7 +601,7 @@ static int vlan_dev_init(struct net_device *dev)
+ 
+ 	SET_NETDEV_DEVTYPE(dev, &vlan_type);
+ 
+-	vlan_dev_set_lockdep_class(dev);
++	vlan_dev_set_lockdep_class(dev, dev->lower_level);
+ 
+ 	vlan->vlan_pcpu_stats = netdev_alloc_pcpu_stats(struct vlan_pcpu_stats);
+ 	if (!vlan->vlan_pcpu_stats)
+diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
+index 0ddd80130ea36..f1f1c86f34193 100644
+--- a/net/batman-adv/soft-interface.c
++++ b/net/batman-adv/soft-interface.c
+@@ -745,6 +745,7 @@ static int batadv_interface_kill_vid(struct net_device *dev, __be16 proto,
+  * separate class since they always nest.
+  */
+ static struct lock_class_key batadv_netdev_xmit_lock_key;
++static struct lock_class_key batadv_netdev_addr_lock_key;
+ 
+ /**
+  * batadv_set_lockdep_class_one() - Set lockdep class for a single tx queue
+@@ -765,6 +766,7 @@ static void batadv_set_lockdep_class_one(struct net_device *dev,
+  */
+ static void batadv_set_lockdep_class(struct net_device *dev)
+ {
++	lockdep_set_class(&dev->addr_list_lock, &batadv_netdev_addr_lock_key);
+ 	netdev_for_each_tx_queue(dev, batadv_set_lockdep_class_one, NULL);
+ }
+ 
+diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
+index 8ec1362588af1..8c7b78f8bc230 100644
+--- a/net/bridge/br_device.c
++++ b/net/bridge/br_device.c
+@@ -105,6 +105,13 @@ out:
+ 	return NETDEV_TX_OK;
+ }
+ 
++static struct lock_class_key bridge_netdev_addr_lock_key;
++
++static void br_set_lockdep_class(struct net_device *dev)
++{
++	lockdep_set_class(&dev->addr_list_lock, &bridge_netdev_addr_lock_key);
++}
++
+ static int br_dev_init(struct net_device *dev)
+ {
+ 	struct net_bridge *br = netdev_priv(dev);
+@@ -143,6 +150,7 @@ static int br_dev_init(struct net_device *dev)
+ 		br_fdb_hash_fini(br);
+ 	}
+ 
++	br_set_lockdep_class(dev);
+ 	return err;
+ }
+ 
+diff --git a/net/core/dev.c b/net/core/dev.c
+index 061496a1f640f..6bc2388141f6f 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -439,6 +439,7 @@ static const char *const netdev_lock_name[] = {
+ 	"_xmit_IEEE802154", "_xmit_VOID", "_xmit_NONE"};
+ 
+ static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];
++static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)];
+ 
+ static inline unsigned short netdev_lock_pos(unsigned short dev_type)
+ {
+@@ -460,11 +461,25 @@ static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
+ 	lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i],
+ 				   netdev_lock_name[i]);
+ }
++
++static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
++{
++	int i;
++
++	i = netdev_lock_pos(dev->type);
++	lockdep_set_class_and_name(&dev->addr_list_lock,
++				   &netdev_addr_lock_key[i],
++				   netdev_lock_name[i]);
++}
+ #else
+ static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
+ 						 unsigned short dev_type)
+ {
+ }
++
++static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
++{
++}
+ #endif
+ 
+ /*******************************************************************************
+@@ -9373,15 +9388,6 @@ void netif_tx_stop_all_queues(struct net_device *dev)
+ }
+ EXPORT_SYMBOL(netif_tx_stop_all_queues);
+ 
+-void netdev_update_lockdep_key(struct net_device *dev)
+-{
+-	lockdep_unregister_key(&dev->addr_list_lock_key);
+-	lockdep_register_key(&dev->addr_list_lock_key);
+-
+-	lockdep_set_class(&dev->addr_list_lock, &dev->addr_list_lock_key);
+-}
+-EXPORT_SYMBOL(netdev_update_lockdep_key);
+-
+ /**
+  *	register_netdevice	- register a network device
+  *	@dev: device to register
+@@ -9420,7 +9426,7 @@ int register_netdevice(struct net_device *dev)
+ 		return ret;
+ 
+ 	spin_lock_init(&dev->addr_list_lock);
+-	lockdep_set_class(&dev->addr_list_lock, &dev->addr_list_lock_key);
++	netdev_set_addr_lockdep_class(dev);
+ 
+ 	ret = dev_get_valid_name(net, dev, dev->name);
+ 	if (ret < 0)
+@@ -9939,8 +9945,6 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
+ 
+ 	dev_net_set(dev, &init_net);
+ 
+-	lockdep_register_key(&dev->addr_list_lock_key);
+-
+ 	dev->gso_max_size = GSO_MAX_SIZE;
+ 	dev->gso_max_segs = GSO_MAX_SEGS;
+ 	dev->upper_level = 1;
+@@ -10028,8 +10032,6 @@ void free_netdev(struct net_device *dev)
+ 	free_percpu(dev->xdp_bulkq);
+ 	dev->xdp_bulkq = NULL;
+ 
+-	lockdep_unregister_key(&dev->addr_list_lock_key);
+-
+ 	/*  Compatibility with error handling in drivers */
+ 	if (dev->reg_state == NETREG_UNINITIALIZED) {
+ 		netdev_freemem(dev);
+diff --git a/net/core/dev_addr_lists.c b/net/core/dev_addr_lists.c
+index 2f949b5a1eb9c..6393ba930097b 100644
+--- a/net/core/dev_addr_lists.c
++++ b/net/core/dev_addr_lists.c
+@@ -637,7 +637,7 @@ int dev_uc_sync(struct net_device *to, struct net_device *from)
+ 	if (to->addr_len != from->addr_len)
+ 		return -EINVAL;
+ 
+-	netif_addr_lock(to);
++	netif_addr_lock_nested(to);
+ 	err = __hw_addr_sync(&to->uc, &from->uc, to->addr_len);
+ 	if (!err)
+ 		__dev_set_rx_mode(to);
+@@ -667,7 +667,7 @@ int dev_uc_sync_multiple(struct net_device *to, struct net_device *from)
+ 	if (to->addr_len != from->addr_len)
+ 		return -EINVAL;
+ 
+-	netif_addr_lock(to);
++	netif_addr_lock_nested(to);
+ 	err = __hw_addr_sync_multiple(&to->uc, &from->uc, to->addr_len);
+ 	if (!err)
+ 		__dev_set_rx_mode(to);
+@@ -691,7 +691,7 @@ void dev_uc_unsync(struct net_device *to, struct net_device *from)
+ 		return;
+ 
+ 	netif_addr_lock_bh(from);
+-	netif_addr_lock(to);
++	netif_addr_lock_nested(to);
+ 	__hw_addr_unsync(&to->uc, &from->uc, to->addr_len);
+ 	__dev_set_rx_mode(to);
+ 	netif_addr_unlock(to);
+@@ -858,7 +858,7 @@ int dev_mc_sync(struct net_device *to, struct net_device *from)
+ 	if (to->addr_len != from->addr_len)
+ 		return -EINVAL;
+ 
+-	netif_addr_lock(to);
++	netif_addr_lock_nested(to);
+ 	err = __hw_addr_sync(&to->mc, &from->mc, to->addr_len);
+ 	if (!err)
+ 		__dev_set_rx_mode(to);
+@@ -888,7 +888,7 @@ int dev_mc_sync_multiple(struct net_device *to, struct net_device *from)
+ 	if (to->addr_len != from->addr_len)
+ 		return -EINVAL;
+ 
+-	netif_addr_lock(to);
++	netif_addr_lock_nested(to);
+ 	err = __hw_addr_sync_multiple(&to->mc, &from->mc, to->addr_len);
+ 	if (!err)
+ 		__dev_set_rx_mode(to);
+@@ -912,7 +912,7 @@ void dev_mc_unsync(struct net_device *to, struct net_device *from)
+ 		return;
+ 
+ 	netif_addr_lock_bh(from);
+-	netif_addr_lock(to);
++	netif_addr_lock_nested(to);
+ 	__hw_addr_unsync(&to->mc, &from->mc, to->addr_len);
+ 	__dev_set_rx_mode(to);
+ 	netif_addr_unlock(to);
+diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
+index 2269199c58910..9aedc15736adf 100644
+--- a/net/core/rtnetlink.c
++++ b/net/core/rtnetlink.c
+@@ -2462,7 +2462,6 @@ static int do_set_master(struct net_device *dev, int ifindex,
+ 			err = ops->ndo_del_slave(upper_dev, dev);
+ 			if (err)
+ 				return err;
+-			netdev_update_lockdep_key(dev);
+ 		} else {
+ 			return -EOPNOTSUPP;
+ 		}
+diff --git a/net/dsa/master.c b/net/dsa/master.c
+index a621367c6e8c2..480a61460c239 100644
+--- a/net/dsa/master.c
++++ b/net/dsa/master.c
+@@ -327,6 +327,8 @@ static void dsa_master_reset_mtu(struct net_device *dev)
+ 	rtnl_unlock();
+ }
+ 
++static struct lock_class_key dsa_master_addr_list_lock_key;
++
+ int dsa_master_setup(struct net_device *dev, struct dsa_port *cpu_dp)
+ {
+ 	int ret;
+@@ -345,6 +347,8 @@ int dsa_master_setup(struct net_device *dev, struct dsa_port *cpu_dp)
+ 	wmb();
+ 
+ 	dev->dsa_ptr = cpu_dp;
++	lockdep_set_class(&dev->addr_list_lock,
++			  &dsa_master_addr_list_lock_key);
+ 	ret = dsa_master_ethtool_setup(dev);
+ 	if (ret)
+ 		return ret;
+diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c
+index eccc7d366e17f..f90ef6934b8f4 100644
+--- a/net/netrom/af_netrom.c
++++ b/net/netrom/af_netrom.c
+@@ -70,6 +70,7 @@ static const struct proto_ops nr_proto_ops;
+  * separate class since they always nest.
+  */
+ static struct lock_class_key nr_netdev_xmit_lock_key;
++static struct lock_class_key nr_netdev_addr_lock_key;
+ 
+ static void nr_set_lockdep_one(struct net_device *dev,
+ 			       struct netdev_queue *txq,
+@@ -80,6 +81,7 @@ static void nr_set_lockdep_one(struct net_device *dev,
+ 
+ static void nr_set_lockdep_key(struct net_device *dev)
+ {
++	lockdep_set_class(&dev->addr_list_lock, &nr_netdev_addr_lock_key);
+ 	netdev_for_each_tx_queue(dev, nr_set_lockdep_one, NULL);
+ }
+ 
+diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c
+index e7a872207b464..ce85656ac9c15 100644
+--- a/net/rose/af_rose.c
++++ b/net/rose/af_rose.c
+@@ -71,6 +71,7 @@ ax25_address rose_callsign;
+  * separate class since they always nest.
+  */
+ static struct lock_class_key rose_netdev_xmit_lock_key;
++static struct lock_class_key rose_netdev_addr_lock_key;
+ 
+ static void rose_set_lockdep_one(struct net_device *dev,
+ 				 struct netdev_queue *txq,
+@@ -81,6 +82,7 @@ static void rose_set_lockdep_one(struct net_device *dev,
+ 
+ static void rose_set_lockdep_key(struct net_device *dev)
+ {
++	lockdep_set_class(&dev->addr_list_lock, &rose_netdev_addr_lock_key);
+ 	netdev_for_each_tx_queue(dev, rose_set_lockdep_one, NULL);
+ }
+ 
+-- 
+cgit 1.2.3-1.el7
+
diff --git a/target/linux/mediatek/patches-5.4/999-1780-v5.18-net-get-rid-of-lockdep_set_class_and_subclass.patch b/target/linux/mediatek/patches-5.4/999-1780-v5.18-net-get-rid-of-lockdep_set_class_and_subclass.patch
new file mode 100644
index 0000000..db820ea
--- /dev/null
+++ b/target/linux/mediatek/patches-5.4/999-1780-v5.18-net-get-rid-of-lockdep_set_class_and_subclass.patch
@@ -0,0 +1,88 @@
+From be74294ffa24f5fbc0d6643842e3e095447e17a2 Mon Sep 17 00:00:00 2001
+From: Cong Wang <xiyou.wangcong@gmail.com>
+Date: Fri, 26 Jun 2020 11:24:22 -0700
+Subject: net: get rid of lockdep_set_class_and_subclass()
+
+lockdep_set_class_and_subclass() is meant to reduce
+the _nested() annotations by assigning a default subclass.
+For addr_list_lock, we have to compute the subclass at
+run-time as the netdevice topology changes after creation.
+
+So, we should just get rid of these
+lockdep_set_class_and_subclass() and stick with our _nested()
+annotations.
+
+Fixes: 845e0ebb4408 ("net: change addr_list_lock back to static key")
+Suggested-by: Taehee Yoo <ap420073@gmail.com>
+Cc: Dmitry Vyukov <dvyukov@google.com>
+Signed-off-by: Cong Wang <xiyou.wangcong@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+---
+ drivers/net/macsec.c  | 5 ++---
+ drivers/net/macvlan.c | 5 ++---
+ net/8021q/vlan_dev.c  | 9 ++++-----
+ 3 files changed, 8 insertions(+), 11 deletions(-)
+
+diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
+index e56547bfdac9a..9159846b8b938 100644
+--- a/drivers/net/macsec.c
++++ b/drivers/net/macsec.c
+@@ -4052,9 +4052,8 @@ static int macsec_newlink(struct net *net, struct net_device *dev,
+ 		return err;
+ 
+ 	netdev_lockdep_set_classes(dev);
+-	lockdep_set_class_and_subclass(&dev->addr_list_lock,
+-				       &macsec_netdev_addr_lock_key,
+-				       dev->lower_level);
++	lockdep_set_class(&dev->addr_list_lock,
++			  &macsec_netdev_addr_lock_key);
+ 
+ 	err = netdev_upper_dev_link(real_dev, dev, extack);
+ 	if (err < 0)
+diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
+index 6a6cc9f753075..4942f6112e51f 100644
+--- a/drivers/net/macvlan.c
++++ b/drivers/net/macvlan.c
+@@ -880,9 +880,8 @@ static struct lock_class_key macvlan_netdev_addr_lock_key;
+ static void macvlan_set_lockdep_class(struct net_device *dev)
+ {
+ 	netdev_lockdep_set_classes(dev);
+-	lockdep_set_class_and_subclass(&dev->addr_list_lock,
+-				       &macvlan_netdev_addr_lock_key,
+-				       dev->lower_level);
++	lockdep_set_class(&dev->addr_list_lock,
++			  &macvlan_netdev_addr_lock_key);
+ }
+ 
+ static int macvlan_init(struct net_device *dev)
+diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
+index c8d6a07e23c57..3dd7c972677be 100644
+--- a/net/8021q/vlan_dev.c
++++ b/net/8021q/vlan_dev.c
+@@ -503,11 +503,10 @@ static void vlan_dev_set_lockdep_one(struct net_device *dev,
+ 	lockdep_set_class(&txq->_xmit_lock, &vlan_netdev_xmit_lock_key);
+ }
+ 
+-static void vlan_dev_set_lockdep_class(struct net_device *dev, int subclass)
++static void vlan_dev_set_lockdep_class(struct net_device *dev)
+ {
+-	lockdep_set_class_and_subclass(&dev->addr_list_lock,
+-				       &vlan_netdev_addr_lock_key,
+-				       subclass);
++	lockdep_set_class(&dev->addr_list_lock,
++			  &vlan_netdev_addr_lock_key);
+ 	netdev_for_each_tx_queue(dev, vlan_dev_set_lockdep_one, NULL);
+ }
+ 
+@@ -601,7 +600,7 @@ static int vlan_dev_init(struct net_device *dev)
+ 
+ 	SET_NETDEV_DEVTYPE(dev, &vlan_type);
+ 
+-	vlan_dev_set_lockdep_class(dev, dev->lower_level);
++	vlan_dev_set_lockdep_class(dev);
+ 
+ 	vlan->vlan_pcpu_stats = netdev_alloc_pcpu_stats(struct vlan_pcpu_stats);
+ 	if (!vlan->vlan_pcpu_stats)
+-- 
+cgit 1.2.3-1.el7
+
diff --git a/target/linux/mediatek/patches-5.4/999-1781-v5.18-netlink-consistently-use-NLA_POLICY_MIN_LEN.patch b/target/linux/mediatek/patches-5.4/999-1781-v5.18-netlink-consistently-use-NLA_POLICY_MIN_LEN.patch
new file mode 100644
index 0000000..27e4e8e
--- /dev/null
+++ b/target/linux/mediatek/patches-5.4/999-1781-v5.18-netlink-consistently-use-NLA_POLICY_MIN_LEN.patch
@@ -0,0 +1,50 @@
+From bc0435855041d7fff0b83dd992fc4be34aa11afb Mon Sep 17 00:00:00 2001
+From: Johannes Berg <johannes.berg@intel.com>
+Date: Tue, 18 Aug 2020 10:17:32 +0200
+Subject: netlink: consistently use NLA_POLICY_MIN_LEN()
+
+Change places that open-code NLA_POLICY_MIN_LEN() to
+use the macro instead, giving us flexibility in how we
+handle the details of the macro.
+
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+---
+ drivers/net/macsec.c            | 2 +-
+ drivers/net/wireguard/netlink.c | 4 ++--
+ net/wireless/nl80211.c          | 6 +++---
+ 3 files changed, 6 insertions(+), 6 deletions(-)
+
+--- a/drivers/net/macsec.c
++++ b/drivers/net/macsec.c
+@@ -1608,7 +1608,7 @@ static const struct nla_policy macsec_ge
+ static const struct nla_policy macsec_genl_sa_policy[NUM_MACSEC_SA_ATTR] = {
+ 	[MACSEC_SA_ATTR_AN] = { .type = NLA_U8 },
+ 	[MACSEC_SA_ATTR_ACTIVE] = { .type = NLA_U8 },
+-	[MACSEC_SA_ATTR_PN] = { .type = NLA_MIN_LEN, .len = 4 },
++	[MACSEC_SA_ATTR_PN] = NLA_POLICY_MIN_LEN(4),
+ 	[MACSEC_SA_ATTR_KEYID] = { .type = NLA_BINARY,
+ 				   .len = MACSEC_KEYID_LEN, },
+ 	[MACSEC_SA_ATTR_KEY] = { .type = NLA_BINARY,
+--- a/net/wireless/nl80211.c
++++ b/net/wireless/nl80211.c
+@@ -676,7 +676,7 @@ nl80211_wowlan_tcp_policy[NUM_NL80211_WO
+ 	},
+ 	[NL80211_WOWLAN_TCP_SRC_PORT] = { .type = NLA_U16 },
+ 	[NL80211_WOWLAN_TCP_DST_PORT] = { .type = NLA_U16 },
+-	[NL80211_WOWLAN_TCP_DATA_PAYLOAD] = { .type = NLA_MIN_LEN, .len = 1 },
++	[NL80211_WOWLAN_TCP_DATA_PAYLOAD] = NLA_POLICY_MIN_LEN(1),
+ 	[NL80211_WOWLAN_TCP_DATA_PAYLOAD_SEQ] = {
+ 		.len = sizeof(struct nl80211_wowlan_tcp_data_seq)
+ 	},
+@@ -684,8 +684,8 @@ nl80211_wowlan_tcp_policy[NUM_NL80211_WO
+ 		.len = sizeof(struct nl80211_wowlan_tcp_data_token)
+ 	},
+ 	[NL80211_WOWLAN_TCP_DATA_INTERVAL] = { .type = NLA_U32 },
+-	[NL80211_WOWLAN_TCP_WAKE_PAYLOAD] = { .type = NLA_MIN_LEN, .len = 1 },
+-	[NL80211_WOWLAN_TCP_WAKE_MASK] = { .type = NLA_MIN_LEN, .len = 1 },
++	[NL80211_WOWLAN_TCP_WAKE_PAYLOAD] = NLA_POLICY_MIN_LEN(1),
++	[NL80211_WOWLAN_TCP_WAKE_MASK] = NLA_POLICY_MIN_LEN(1),
+ };
+ #endif /* CONFIG_PM */
+ 
diff --git a/target/linux/mediatek/patches-5.4/999-1782-v5.18-net-macsec-Add-missing-documentation-for-gro_cells.patch b/target/linux/mediatek/patches-5.4/999-1782-v5.18-net-macsec-Add-missing-documentation-for-gro_cells.patch
new file mode 100644
index 0000000..7d781e7
--- /dev/null
+++ b/target/linux/mediatek/patches-5.4/999-1782-v5.18-net-macsec-Add-missing-documentation-for-gro_cells.patch
@@ -0,0 +1,31 @@
+From ecdc5689d93eab429f7a48ae058b7c516a4a3c95 Mon Sep 17 00:00:00 2001
+From: Lee Jones <lee.jones@linaro.org>
+Date: Mon, 2 Nov 2020 11:45:07 +0000
+Subject: net: macsec: Add missing documentation for 'gro_cells'
+
+Fixes the following W=1 kernel build warning(s):
+
+ drivers/net/macsec.c:113: warning: Function parameter or member 'gro_cells' not described in 'macsec_dev'
+
+Signed-off-by: Lee Jones <lee.jones@linaro.org>
+Link: https://lore.kernel.org/r/20201102114512.1062724-26-lee.jones@linaro.org
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+---
+ drivers/net/macsec.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
+index 11ca5fa902a16..92425e1fd70c0 100644
+--- a/drivers/net/macsec.c
++++ b/drivers/net/macsec.c
+@@ -101,6 +101,7 @@ struct pcpu_secy_stats {
+  * @real_dev: pointer to underlying netdevice
+  * @stats: MACsec device stats
+  * @secys: linked list of SecY's on the underlying device
++ * @gro_cells: pointer to the Generic Receive Offload cell
+  * @offload: status of offloading on the MACsec device
+  */
+ struct macsec_dev {
+-- 
+cgit 1.2.3-1.el7
+
diff --git a/target/linux/mediatek/patches-5.4/999-1783-v5.18-net-macsec-fix-the-length-used-to-copy-the-key-for-offloading.patch b/target/linux/mediatek/patches-5.4/999-1783-v5.18-net-macsec-fix-the-length-used-to-copy-the-key-for-offloading.patch
new file mode 100644
index 0000000..ed68254
--- /dev/null
+++ b/target/linux/mediatek/patches-5.4/999-1783-v5.18-net-macsec-fix-the-length-used-to-copy-the-key-for-offloading.patch
@@ -0,0 +1,60 @@
+From 1f7fe5121127e037b86592ba42ce36515ea0e3f7 Mon Sep 17 00:00:00 2001
+From: Antoine Tenart <atenart@kernel.org>
+Date: Thu, 24 Jun 2021 11:38:28 +0200
+Subject: net: macsec: fix the length used to copy the key for offloading
+
+The key length used when offloading macsec to Ethernet or PHY drivers
+was set to MACSEC_KEYID_LEN (16), which is an issue as:
+- This was never meant to be the key length.
+- The key length can be > 16.
+
+Fix this by using MACSEC_MAX_KEY_LEN to store the key (the max length
+accepted in uAPI) and secy->key_len to copy it.
+
+Fixes: 3cf3227a21d1 ("net: macsec: hardware offloading infrastructure")
+Reported-by: Lior Nahmanson <liorna@nvidia.com>
+Signed-off-by: Antoine Tenart <atenart@kernel.org>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+---
+ drivers/net/macsec.c | 4 ++--
+ include/net/macsec.h | 2 +-
+ 2 files changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
+index 92425e1fd70c0..93dc48b9b4f24 100644
+--- a/drivers/net/macsec.c
++++ b/drivers/net/macsec.c
+@@ -1819,7 +1819,7 @@ static int macsec_add_rxsa(struct sk_buff *skb, struct genl_info *info)
+ 		ctx.sa.rx_sa = rx_sa;
+ 		ctx.secy = secy;
+ 		memcpy(ctx.sa.key, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]),
+-		       MACSEC_KEYID_LEN);
++		       secy->key_len);
+ 
+ 		err = macsec_offload(ops->mdo_add_rxsa, &ctx);
+ 		if (err)
+@@ -2061,7 +2061,7 @@ static int macsec_add_txsa(struct sk_buff *skb, struct genl_info *info)
+ 		ctx.sa.tx_sa = tx_sa;
+ 		ctx.secy = secy;
+ 		memcpy(ctx.sa.key, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]),
+-		       MACSEC_KEYID_LEN);
++		       secy->key_len);
+ 
+ 		err = macsec_offload(ops->mdo_add_txsa, &ctx);
+ 		if (err)
+diff --git a/include/net/macsec.h b/include/net/macsec.h
+index 52874cdfe2260..d6fa6b97f6efa 100644
+--- a/include/net/macsec.h
++++ b/include/net/macsec.h
+@@ -241,7 +241,7 @@ struct macsec_context {
+ 	struct macsec_rx_sc *rx_sc;
+ 	struct {
+ 		unsigned char assoc_num;
+-		u8 key[MACSEC_KEYID_LEN];
++		u8 key[MACSEC_MAX_KEY_LEN];
+ 		union {
+ 			struct macsec_rx_sa *rx_sa;
+ 			struct macsec_tx_sa *tx_sa;
+-- 
+cgit 1.2.3-1.el7
+
diff --git a/target/linux/mediatek/patches-5.4/999-1784-v5.18-net-drivers-get-ready-for-const-netdev-dev_addr.patch b/target/linux/mediatek/patches-5.4/999-1784-v5.18-net-drivers-get-ready-for-const-netdev-dev_addr.patch
new file mode 100644
index 0000000..ad3bd2f
--- /dev/null
+++ b/target/linux/mediatek/patches-5.4/999-1784-v5.18-net-drivers-get-ready-for-const-netdev-dev_addr.patch
@@ -0,0 +1,73 @@
+From 8bc7823ed3bd5b87765e1b3d6f72c69624680921 Mon Sep 17 00:00:00 2001
+From: Jakub Kicinski <kuba@kernel.org>
+Date: Fri, 22 Oct 2021 16:21:02 -0700
+Subject: net: drivers: get ready for const netdev->dev_addr
+
+Commit 406f42fa0d3c ("net-next: When a bond have a massive amount
+of VLANs...") introduced a rbtree for faster Ethernet address look
+up. To maintain netdev->dev_addr in this tree we need to make all
+the writes to it go through appropriate helpers. We will make
+netdev->dev_addr a const.
+
+Make sure local references to netdev->dev_addr are constant.
+
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+---
+ drivers/net/macsec.c              | 2 +-
+ drivers/net/macvlan.c             | 3 ++-
+ drivers/net/vmxnet3/vmxnet3_drv.c | 4 ++--
+ 3 files changed, 5 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
+index 18b6dba9394e8..16aa3a478e9e8 100644
+--- a/drivers/net/macsec.c
++++ b/drivers/net/macsec.c
+@@ -250,7 +250,7 @@ static bool send_sci(const struct macsec_secy *secy)
+ 		(secy->n_rx_sc > 1 && !tx_sc->end_station && !tx_sc->scb);
+ }
+ 
+-static sci_t make_sci(u8 *addr, __be16 port)
++static sci_t make_sci(const u8 *addr, __be16 port)
+ {
+ 	sci_t sci;
+ 
+diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
+index 6189acb33973c..d2f830ec2969c 100644
+--- a/drivers/net/macvlan.c
++++ b/drivers/net/macvlan.c
+@@ -698,7 +698,8 @@ hash_del:
+ 	return 0;
+ }
+ 
+-static int macvlan_sync_address(struct net_device *dev, unsigned char *addr)
++static int macvlan_sync_address(struct net_device *dev,
++				const unsigned char *addr)
+ {
+ 	struct macvlan_dev *vlan = netdev_priv(dev);
+ 	struct net_device *lowerdev = vlan->lowerdev;
+diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
+index 7a205ddf0060a..3e1b7746cce44 100644
+--- a/drivers/net/vmxnet3/vmxnet3_drv.c
++++ b/drivers/net/vmxnet3/vmxnet3_drv.c
+@@ -46,7 +46,7 @@ MODULE_DEVICE_TABLE(pci, vmxnet3_pciid_table);
+ static int enable_mq = 1;
+ 
+ static void
+-vmxnet3_write_mac_addr(struct vmxnet3_adapter *adapter, u8 *mac);
++vmxnet3_write_mac_addr(struct vmxnet3_adapter *adapter, const u8 *mac);
+ 
+ /*
+  *    Enable/Disable the given intr
+@@ -2806,7 +2806,7 @@ vmxnet3_quiesce_dev(struct vmxnet3_adapter *adapter)
+ 
+ 
+ static void
+-vmxnet3_write_mac_addr(struct vmxnet3_adapter *adapter, u8 *mac)
++vmxnet3_write_mac_addr(struct vmxnet3_adapter *adapter, const u8 *mac)
+ {
+ 	u32 tmp;
+ 
+-- 
+cgit 1.2.3-1.el7
+
diff --git a/target/linux/mediatek/patches-5.4/999-1785-v5.18-net-macsec-Fix-offload-support-for-NETDEV_UNREGISTER-event.patch b/target/linux/mediatek/patches-5.4/999-1785-v5.18-net-macsec-Fix-offload-support-for-NETDEV_UNREGISTER-event.patch
new file mode 100644
index 0000000..b3e5930
--- /dev/null
+++ b/target/linux/mediatek/patches-5.4/999-1785-v5.18-net-macsec-Fix-offload-support-for-NETDEV_UNREGISTER-event.patch
@@ -0,0 +1,70 @@
+From 9cef24c8b76c1f6effe499d2f131807c90f7ce9a Mon Sep 17 00:00:00 2001
+From: Lior Nahmanson <liorna@nvidia.com>
+Date: Sun, 30 Jan 2022 13:29:01 +0200
+Subject: net: macsec: Fix offload support for NETDEV_UNREGISTER event
+
+Current macsec netdev notify handler handles NETDEV_UNREGISTER event by
+releasing relevant SW resources only, this causes resources leak in case
+of macsec HW offload, as the underlay driver was not notified to clean
+it's macsec offload resources.
+
+Fix by calling the underlay driver to clean it's relevant resources
+by moving offload handling from macsec_dellink() to macsec_common_dellink()
+when handling NETDEV_UNREGISTER event.
+
+Fixes: 3cf3227a21d1 ("net: macsec: hardware offloading infrastructure")
+Signed-off-by: Lior Nahmanson <liorna@nvidia.com>
+Reviewed-by: Raed Salem <raeds@nvidia.com>
+Signed-off-by: Raed Salem <raeds@nvidia.com>
+Reviewed-by: Antoine Tenart <atenart@kernel.org>
+Link: https://lore.kernel.org/r/1643542141-28956-1-git-send-email-raeds@nvidia.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+---
+ drivers/net/macsec.c | 24 ++++++++++++------------
+ 1 file changed, 12 insertions(+), 12 deletions(-)
+
+diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
+index 16aa3a478e9e8..33ff33c05aabc 100644
+--- a/drivers/net/macsec.c
++++ b/drivers/net/macsec.c
+@@ -3870,6 +3870,18 @@ static void macsec_common_dellink(struct net_device *dev, struct list_head *head
+ 	struct macsec_dev *macsec = macsec_priv(dev);
+ 	struct net_device *real_dev = macsec->real_dev;
+ 
++	/* If h/w offloading is available, propagate to the device */
++	if (macsec_is_offloaded(macsec)) {
++		const struct macsec_ops *ops;
++		struct macsec_context ctx;
++
++		ops = macsec_get_ops(netdev_priv(dev), &ctx);
++		if (ops) {
++			ctx.secy = &macsec->secy;
++			macsec_offload(ops->mdo_del_secy, &ctx);
++		}
++	}
++
+ 	unregister_netdevice_queue(dev, head);
+ 	list_del_rcu(&macsec->secys);
+ 	macsec_del_dev(macsec);
+@@ -3884,18 +3896,6 @@ static void macsec_dellink(struct net_device *dev, struct list_head *head)
+ 	struct net_device *real_dev = macsec->real_dev;
+ 	struct macsec_rxh_data *rxd = macsec_data_rtnl(real_dev);
+ 
+-	/* If h/w offloading is available, propagate to the device */
+-	if (macsec_is_offloaded(macsec)) {
+-		const struct macsec_ops *ops;
+-		struct macsec_context ctx;
+-
+-		ops = macsec_get_ops(netdev_priv(dev), &ctx);
+-		if (ops) {
+-			ctx.secy = &macsec->secy;
+-			macsec_offload(ops->mdo_del_secy, &ctx);
+-		}
+-	}
+-
+ 	macsec_common_dellink(dev, head);
+ 
+ 	if (list_empty(&rxd->secys)) {
+-- 
+cgit 1.2.3-1.el7
+
diff --git a/target/linux/mediatek/patches-5.4/999-1786-v5.18-net-macsec-Verify-that-send_sci-is-on-when-setting-Tx-sci-explicitly.patch b/target/linux/mediatek/patches-5.4/999-1786-v5.18-net-macsec-Verify-that-send_sci-is-on-when-setting-Tx-sci-explicitly.patch
new file mode 100644
index 0000000..bc78ff3
--- /dev/null
+++ b/target/linux/mediatek/patches-5.4/999-1786-v5.18-net-macsec-Verify-that-send_sci-is-on-when-setting-Tx-sci-explicitly.patch
@@ -0,0 +1,43 @@
+From d0cfa548dbde354de986911d3913897b5448faad Mon Sep 17 00:00:00 2001
+From: Lior Nahmanson <liorna@nvidia.com>
+Date: Sun, 30 Jan 2022 13:37:52 +0200
+Subject: net: macsec: Verify that send_sci is on when setting Tx sci
+ explicitly
+
+When setting Tx sci explicit, the Rx side is expected to use this
+sci and not recalculate it from the packet.However, in case of Tx sci
+is explicit and send_sci is off, the receiver is wrongly recalculate
+the sci from the source MAC address which most likely be different
+than the explicit sci.
+
+Fix by preventing such configuration when macsec newlink is established
+and return EINVAL error code on such cases.
+
+Fixes: c09440f7dcb3 ("macsec: introduce IEEE 802.1AE driver")
+Signed-off-by: Lior Nahmanson <liorna@nvidia.com>
+Reviewed-by: Raed Salem <raeds@nvidia.com>
+Signed-off-by: Raed Salem <raeds@nvidia.com>
+Link: https://lore.kernel.org/r/1643542672-29403-1-git-send-email-raeds@nvidia.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+---
+ drivers/net/macsec.c | 9 +++++++++
+ 1 file changed, 9 insertions(+)
+
+--- a/drivers/net/macsec.c
++++ b/drivers/net/macsec.c
+@@ -4047,6 +4047,15 @@ static int macsec_newlink(struct net *ne
+ 	    !macsec_check_offload(macsec->offload, macsec))
+ 		return -EOPNOTSUPP;
+ 
++	/* send_sci must be set to true when transmit sci explicitly is set */
++	if ((data && data[IFLA_MACSEC_SCI]) &&
++	    (data && data[IFLA_MACSEC_INC_SCI])) {
++		u8 send_sci = !!nla_get_u8(data[IFLA_MACSEC_INC_SCI]);
++
++		if (!send_sci)
++			return -EINVAL;
++	}
++
+ 	if (data && data[IFLA_MACSEC_ICV_LEN])
+ 		icv_len = nla_get_u8(data[IFLA_MACSEC_ICV_LEN]);
+ 	mtu = real_dev->mtu - icv_len - macsec_extra_len(true);