[][MAC80211][Add Bridger related patches]

[Description]
Add Bridger related patches.

[Release-log]
N/A

Change-Id: I62a66ef99dc14ef2a86598a5bb251c0168e4cdd7
Reviewed-on: https://gerrit.mediatek.inc/c/openwrt/feeds/mtk_openwrt_feeds/+/6295961
diff --git a/autobuild_mac80211_release/0010-add-llvm_bpf-toolchain.patch b/autobuild_mac80211_release/0010-add-llvm_bpf-toolchain.patch
new file mode 100644
index 0000000..143c03e
--- /dev/null
+++ b/autobuild_mac80211_release/0010-add-llvm_bpf-toolchain.patch
@@ -0,0 +1,134 @@
+diff --git a/target/Makefile b/target/Makefile
+index 7ad26c71..83f9c4b0 100644
+--- a/target/Makefile
++++ b/target/Makefile
+@@ -7,9 +7,14 @@
+ curdir:=target
+ 
+ $(curdir)/subtargets:=install
+-$(curdir)/builddirs:=linux sdk imagebuilder toolchain
++$(curdir)/builddirs:=linux sdk imagebuilder toolchain llvm-bpf
+ $(curdir)/builddirs-default:=linux
+-$(curdir)/builddirs-install:=linux $(if $(CONFIG_SDK),sdk) $(if $(CONFIG_IB),imagebuilder) $(if $(CONFIG_MAKE_TOOLCHAIN),toolchain)
++$(curdir)/builddirs-install:=\
++	linux \
++	$(if $(CONFIG_SDK),sdk) \
++	$(if $(CONFIG_IB),imagebuilder) \
++	$(if $(CONFIG_MAKE_TOOLCHAIN),toolchain) \
++	$(if $(CONFIG_SDK_LLVM_BPF),llvm-bpf)
+ 
+ $(curdir)/sdk/install:=$(curdir)/linux/install
+ $(curdir)/imagebuilder/install:=$(curdir)/linux/install
+diff --git a/toolchain/Config.in b/toolchain/Config.in
+index 6dda9af9..221e25f2 100644
+--- a/toolchain/Config.in
++++ b/toolchain/Config.in
+@@ -37,6 +37,39 @@ menuconfig TARGET_OPTIONS
+ 
+ 		  Most people will answer N.
+ 
++	choice BPF_TOOLCHAIN
++		prompt "BPF toolchain" if DEVEL
++		default BPF_TOOLCHAIN_BUILD_LLVM if BUILDBOT
++		default BPF_TOOLCHAIN_PREBUILT if HAS_PREBUILT_LLVM_TOOLCHAIN
++		default BPF_TOOLCHAIN_NONE
++
++		config BPF_TOOLCHAIN_NONE
++			bool "None"
++
++		config BPF_TOOLCHAIN_PREBUILT
++			bool "Use prebuilt LLVM toolchain"
++			depends on HAS_PREBUILT_LLVM_TOOLCHAIN
++			select USE_LLVM_PREBUILT
++
++		config BPF_TOOLCHAIN_HOST
++			select USE_LLVM_HOST
++			bool "Use host LLVM toolchain"
++
++		config BPF_TOOLCHAIN_BUILD_LLVM
++			select USE_LLVM_BUILD
++			bool "Build LLVM toolchain for eBPF"
++			help
++			  If enabled, a LLVM toolchain for building eBPF binaries will be built.
++			  If this is not enabled, eBPF packages can only be built if the host
++			  has a suitable toolchain
++	endchoice
++
++	config BPF_TOOLCHAIN_HOST_PATH
++		string
++		depends on BPF_TOOLCHAIN_HOST
++		prompt "Host LLVM toolchain path (prefix)" if DEVEL
++		default "/usr/local/opt/llvm" if HOST_OS_MACOS
++		default ""
+ 
+ menuconfig EXTERNAL_TOOLCHAIN
+ 	bool
+@@ -259,6 +292,26 @@ config GDB
+ 	help
+ 	  Enable if you want to build the gdb.
+ 
++config HAS_BPF_TOOLCHAIN
++	bool
++
++config HAS_PREBUILT_LLVM_TOOLCHAIN
++	def_bool $(shell, [ -f llvm-bpf/.llvm-version ] && echo y || echo n)
++
++config USE_LLVM_HOST
++	select HAS_BPF_TOOLCHAIN
++	bool
++
++config USE_LLVM_PREBUILT
++	select HAS_BPF_TOOLCHAIN
++	default y if !DEVEL && !BUILDBOT && HAS_PREBUILT_LLVM_TOOLCHAIN
++	bool
++
++config USE_LLVM_BUILD
++	default y if !DEVEL && BUILDBOT
++	select HAS_BPF_TOOLCHAIN
++	bool
++
+ config USE_GLIBC
+ 	default y if !TOOLCHAINOPTS && !EXTERNAL_TOOLCHAIN && !NATIVE_TOOLCHAIN && (arc)
+ 	bool
+diff --git a/tools/Makefile b/tools/Makefile
+index 29309ec4..84f984dd 100644
+--- a/tools/Makefile
++++ b/tools/Makefile
+@@ -37,6 +37,7 @@ tools-$(CONFIG_TARGET_mxs) += elftosb sdimage
+ tools-$(CONFIG_TARGET_tegra) += cbootimage cbootimage-configs
+ tools-$(CONFIG_USES_MINOR) += kernel2minor
+ tools-$(CONFIG_USE_SPARSE) += sparse
++tools-$(CONFIG_USE_LLVM_BUILD) += llvm-bpf
+ 
+ # builddir dependencies
+ $(curdir)/autoconf/compile := $(curdir)/m4/compile
+@@ -59,6 +60,7 @@ $(curdir)/libelf/compile := $(curdir)/libtool/compile
+ $(curdir)/libressl/compile := $(curdir)/pkgconf/compile
+ $(curdir)/libtool/compile := $(curdir)/m4/compile $(curdir)/autoconf/compile $(curdir)/automake/compile $(curdir)/missing-macros/compile
+ $(curdir)/lzma-old/compile := $(curdir)/zlib/compile
++$(curdir)/llvm-bpf/compile := $(curdir)/cmake/compile
+ $(curdir)/make-ext4fs/compile := $(curdir)/zlib/compile
+ $(curdir)/missing-macros/compile := $(curdir)/autoconf/compile
+ $(curdir)/mkimage/compile += $(curdir)/libressl/compile
+diff --git a/tools/llvm-bpf/Makefile b/tools/llvm-bpf/Makefile
+index 527b3578..53b7347a 100644
+--- a/tools/llvm-bpf/Makefile
++++ b/tools/llvm-bpf/Makefile
+@@ -26,7 +26,7 @@ include $(INCLUDE_DIR)/cmake.mk
+ 
+ LLVM_BPF_PREFIX = llvm-bpf-$(PKG_VERSION).$(HOST_OS)-$(HOST_ARCH)
+ 
+-CMAKE_HOST_INSTALL_PREFIX = $(STAGING_DIR_HOST)/$(LLVM_BPF_PREFIX)
++HOST_BUILD_PREFIX = $(STAGING_DIR_HOST)/$(LLVM_BPF_PREFIX)
+ 
+ CMAKE_HOST_OPTIONS += \
+ 	-DLLVM_ENABLE_BINDINGS=OFF \
+@@ -46,7 +46,7 @@ define Host/Install
+ 	$(Host/Install/Default)
+ 	ln -s $(LLVM_BPF_PREFIX) $(STAGING_DIR_HOST)/llvm-bpf
+ 	STRIP_KMOD= PATCHELF= STRIP=strip $(SCRIPT_DIR)/rstrip.sh $(STAGING_DIR_HOST)/llvm-bpf
+-	echo "$(PKG_VERSION)" > $(CMAKE_HOST_INSTALL_PREFIX)/.llvm-version
++	echo "$(PKG_VERSION)" > $(HOST_BUILD_PREFIX)/.llvm-version
+ endef
+ 
+ define Host/Uninstall
diff --git a/autobuild_mac80211_release/999-update-uapi-header-files-for-bridger.patch b/autobuild_mac80211_release/999-update-uapi-header-files-for-bridger.patch
new file mode 100644
index 0000000..107cf49
--- /dev/null
+++ b/autobuild_mac80211_release/999-update-uapi-header-files-for-bridger.patch
@@ -0,0 +1,194 @@
+diff --git a/include/uapi/linux/if_bridge.h b/include/uapi/linux/if_bridge.h
+index 1b3c2b6..00bbbf8 100644
+--- a/include/uapi/linux/if_bridge.h
++++ b/include/uapi/linux/if_bridge.h
+@@ -130,6 +130,7 @@ enum {
+ #define BRIDGE_VLAN_INFO_RANGE_BEGIN	(1<<3) /* VLAN is start of vlan range */
+ #define BRIDGE_VLAN_INFO_RANGE_END	(1<<4) /* VLAN is end of vlan range */
+ #define BRIDGE_VLAN_INFO_BRENTRY	(1<<5) /* Global bridge VLAN entry */
++#define BRIDGE_VLAN_INFO_ONLY_OPTS	(1<<6) /* Skip create/delete/flags */
+ 
+ struct bridge_vlan_info {
+ 	__u16 flags;
+@@ -156,6 +157,112 @@ struct bridge_vlan_xstats {
+ 	__u32 pad2;
+ };
+ 
++/* Bridge vlan RTM header */
++struct br_vlan_msg {
++	__u8 family;
++	__u8 reserved1;
++	__u16 reserved2;
++	__u32 ifindex;
++};
++
++enum {
++	BRIDGE_VLANDB_DUMP_UNSPEC,
++	BRIDGE_VLANDB_DUMP_FLAGS,
++	__BRIDGE_VLANDB_DUMP_MAX,
++};
++#define BRIDGE_VLANDB_DUMP_MAX (__BRIDGE_VLANDB_DUMP_MAX - 1)
++
++/* flags used in BRIDGE_VLANDB_DUMP_FLAGS attribute to affect dumps */
++#define BRIDGE_VLANDB_DUMPF_STATS	(1 << 0) /* Include stats in the dump */
++#define BRIDGE_VLANDB_DUMPF_GLOBAL	(1 << 1) /* Dump global vlan options only */
++
++/* Bridge vlan RTM attributes
++ * [BRIDGE_VLANDB_ENTRY] = {
++ *     [BRIDGE_VLANDB_ENTRY_INFO]
++ *     ...
++ * }
++ * [BRIDGE_VLANDB_GLOBAL_OPTIONS] = {
++ *     [BRIDGE_VLANDB_GOPTS_ID]
++ *     ...
++ * }
++ */
++enum {
++	BRIDGE_VLANDB_UNSPEC,
++	BRIDGE_VLANDB_ENTRY,
++	BRIDGE_VLANDB_GLOBAL_OPTIONS,
++	__BRIDGE_VLANDB_MAX,
++};
++#define BRIDGE_VLANDB_MAX (__BRIDGE_VLANDB_MAX - 1)
++
++enum {
++	BRIDGE_VLANDB_ENTRY_UNSPEC,
++	BRIDGE_VLANDB_ENTRY_INFO,
++	BRIDGE_VLANDB_ENTRY_RANGE,
++	BRIDGE_VLANDB_ENTRY_STATE,
++	BRIDGE_VLANDB_ENTRY_TUNNEL_INFO,
++	BRIDGE_VLANDB_ENTRY_STATS,
++	BRIDGE_VLANDB_ENTRY_MCAST_ROUTER,
++	__BRIDGE_VLANDB_ENTRY_MAX,
++};
++#define BRIDGE_VLANDB_ENTRY_MAX (__BRIDGE_VLANDB_ENTRY_MAX - 1)
++
++/* [BRIDGE_VLANDB_ENTRY] = {
++ *     [BRIDGE_VLANDB_ENTRY_TUNNEL_INFO] = {
++ *         [BRIDGE_VLANDB_TINFO_ID]
++ *         ...
++ *     }
++ * }
++ */
++enum {
++	BRIDGE_VLANDB_TINFO_UNSPEC,
++	BRIDGE_VLANDB_TINFO_ID,
++	BRIDGE_VLANDB_TINFO_CMD,
++	__BRIDGE_VLANDB_TINFO_MAX,
++};
++#define BRIDGE_VLANDB_TINFO_MAX (__BRIDGE_VLANDB_TINFO_MAX - 1)
++
++/* [BRIDGE_VLANDB_ENTRY] = {
++ *     [BRIDGE_VLANDB_ENTRY_STATS] = {
++ *         [BRIDGE_VLANDB_STATS_RX_BYTES]
++ *         ...
++ *     }
++ *     ...
++ * }
++ */
++enum {
++	BRIDGE_VLANDB_STATS_UNSPEC,
++	BRIDGE_VLANDB_STATS_RX_BYTES,
++	BRIDGE_VLANDB_STATS_RX_PACKETS,
++	BRIDGE_VLANDB_STATS_TX_BYTES,
++	BRIDGE_VLANDB_STATS_TX_PACKETS,
++	BRIDGE_VLANDB_STATS_PAD,
++	__BRIDGE_VLANDB_STATS_MAX,
++};
++#define BRIDGE_VLANDB_STATS_MAX (__BRIDGE_VLANDB_STATS_MAX - 1)
++
++enum {
++	BRIDGE_VLANDB_GOPTS_UNSPEC,
++	BRIDGE_VLANDB_GOPTS_ID,
++	BRIDGE_VLANDB_GOPTS_RANGE,
++	BRIDGE_VLANDB_GOPTS_MCAST_SNOOPING,
++	BRIDGE_VLANDB_GOPTS_MCAST_IGMP_VERSION,
++	BRIDGE_VLANDB_GOPTS_MCAST_MLD_VERSION,
++	BRIDGE_VLANDB_GOPTS_MCAST_LAST_MEMBER_CNT,
++	BRIDGE_VLANDB_GOPTS_MCAST_STARTUP_QUERY_CNT,
++	BRIDGE_VLANDB_GOPTS_MCAST_LAST_MEMBER_INTVL,
++	BRIDGE_VLANDB_GOPTS_PAD,
++	BRIDGE_VLANDB_GOPTS_MCAST_MEMBERSHIP_INTVL,
++	BRIDGE_VLANDB_GOPTS_MCAST_QUERIER_INTVL,
++	BRIDGE_VLANDB_GOPTS_MCAST_QUERY_INTVL,
++	BRIDGE_VLANDB_GOPTS_MCAST_QUERY_RESPONSE_INTVL,
++	BRIDGE_VLANDB_GOPTS_MCAST_STARTUP_QUERY_INTVL,
++	BRIDGE_VLANDB_GOPTS_MCAST_QUERIER,
++	BRIDGE_VLANDB_GOPTS_MCAST_ROUTER_PORTS,
++	BRIDGE_VLANDB_GOPTS_MCAST_QUERIER_STATE,
++	__BRIDGE_VLANDB_GOPTS_MAX
++};
++#define BRIDGE_VLANDB_GOPTS_MAX (__BRIDGE_VLANDB_GOPTS_MAX - 1)
++
+ /* Bridge multicast database attributes
+  * [MDBA_MDB] = {
+  *     [MDBA_MDB_ENTRY] = {
+diff --git a/include/uapi/linux/pkt_cls.h b/include/uapi/linux/pkt_cls.h
+index a6aa466..6abd5a1 100644
+--- a/include/uapi/linux/pkt_cls.h
++++ b/include/uapi/linux/pkt_cls.h
+@@ -16,9 +16,37 @@ enum {
+ 	TCA_ACT_STATS,
+ 	TCA_ACT_PAD,
+ 	TCA_ACT_COOKIE,
++	TCA_ACT_FLAGS,
++	TCA_ACT_HW_STATS,
++	TCA_ACT_USED_HW_STATS,
+ 	__TCA_ACT_MAX
+ };
+ 
++/* See other TCA_ACT_FLAGS_ * flags in include/net/act_api.h. */
++#define TCA_ACT_FLAGS_NO_PERCPU_STATS 1 /* Don't use percpu allocator for
++					 * actions stats.
++					 */
++
++/* tca HW stats type
++ * When user does not pass the attribute, he does not care.
++ * It is the same as if he would pass the attribute with
++ * all supported bits set.
++ * In case no bits are set, user is not interested in getting any HW statistics.
++ */
++#define TCA_ACT_HW_STATS_IMMEDIATE (1 << 0) /* Means that in dump, user
++					     * gets the current HW stats
++					     * state from the device
++					     * queried at the dump time.
++					     */
++#define TCA_ACT_HW_STATS_DELAYED (1 << 1) /* Means that in dump, user gets
++					   * HW stats that might be out of date
++					   * for some time, maybe couple of
++					   * seconds. This is the case when
++					   * driver polls stats updates
++					   * periodically or when it gets async
++					   * stats update from the device.
++					   */
++
+ #define TCA_ACT_MAX __TCA_ACT_MAX
+ #define TCA_OLD_COMPAT (TCA_ACT_MAX+1)
+ #define TCA_ACT_MAX_PRIO 32
+diff --git a/include/uapi/linux/rtnetlink.h b/include/uapi/linux/rtnetlink.h
+index 96eca6e..ff43cb9 100644
+--- a/include/uapi/linux/rtnetlink.h
++++ b/include/uapi/linux/rtnetlink.h
+@@ -164,6 +164,13 @@ enum {
+ 	RTM_GETNEXTHOP,
+ #define RTM_GETNEXTHOP	RTM_GETNEXTHOP
+ 
++	RTM_NEWVLAN = 112,
++#define RTM_NEWNVLAN	RTM_NEWVLAN
++	RTM_DELVLAN,
++#define RTM_DELVLAN	RTM_DELVLAN
++	RTM_GETVLAN,
++#define RTM_GETVLAN	RTM_GETVLAN
++
+ 	__RTM_MAX,
+ #define RTM_MAX		(((__RTM_MAX + 3) & ~3) - 1)
+ };
+@@ -717,6 +724,8 @@ enum rtnetlink_groups {
+ #define RTNLGRP_IPV6_MROUTE_R	RTNLGRP_IPV6_MROUTE_R
+ 	RTNLGRP_NEXTHOP,
+ #define RTNLGRP_NEXTHOP		RTNLGRP_NEXTHOP
++	RTNLGRP_BRVLAN,
++#define RTNLGRP_BRVLAN		RTNLGRP_BRVLAN
+ 	__RTNLGRP_MAX
+ };
+ #define RTNLGRP_MAX	(__RTNLGRP_MAX - 1)
diff --git a/autobuild_mac80211_release/target/linux/mediatek/patches-5.4/9999-update-net-bridge-for-bridger.patch b/autobuild_mac80211_release/target/linux/mediatek/patches-5.4/9999-update-net-bridge-for-bridger.patch
new file mode 100644
index 0000000..6fe3733
--- /dev/null
+++ b/autobuild_mac80211_release/target/linux/mediatek/patches-5.4/9999-update-net-bridge-for-bridger.patch
@@ -0,0 +1,1823 @@
+diff --git a/include/net/switchdev.h b/include/net/switchdev.h
+index 191dc34..d4d71d9 100644
+--- a/include/net/switchdev.h
++++ b/include/net/switchdev.h
+@@ -77,6 +77,7 @@ struct switchdev_obj {
+ struct switchdev_obj_port_vlan {
+ 	struct switchdev_obj obj;
+ 	u16 flags;
++	u16 vid;
+ 	u16 vid_begin;
+ 	u16 vid_end;
+ };
+@@ -117,6 +118,7 @@ enum switchdev_notifier_type {
+ struct switchdev_notifier_info {
+ 	struct net_device *dev;
+ 	struct netlink_ext_ack *extack;
++	const void *ctx;
+ };
+ 
+ struct switchdev_notifier_fdb_info {
+diff --git a/net/bridge/Makefile b/net/bridge/Makefile
+index ac9ef33..49da7ae 100644
+--- a/net/bridge/Makefile
++++ b/net/bridge/Makefile
+@@ -20,7 +20,7 @@ obj-$(CONFIG_BRIDGE_NETFILTER) += br_netfilter.o
+ 
+ bridge-$(CONFIG_BRIDGE_IGMP_SNOOPING) += br_multicast.o br_mdb.o
+ 
+-bridge-$(CONFIG_BRIDGE_VLAN_FILTERING) += br_vlan.o br_vlan_tunnel.o
++bridge-$(CONFIG_BRIDGE_VLAN_FILTERING) += br_vlan.o br_vlan_tunnel.o br_vlan_options.o
+ 
+ bridge-$(CONFIG_NET_SWITCHDEV) += br_switchdev.o
+ 
+diff --git a/net/bridge/br_mdb.c b/net/bridge/br_mdb.c
+index da5ed4c..eeabfbc 100644
+--- a/net/bridge/br_mdb.c
++++ b/net/bridge/br_mdb.c
+@@ -16,7 +16,37 @@
+ 
+ #include "br_private.h"
+ 
+-static int br_rports_fill_info(struct sk_buff *skb, struct netlink_callback *cb,
++static size_t __br_rports_one_size(void)
++{
++	return nla_total_size(sizeof(u32)) + /* MDBA_ROUTER_PORT */
++	       nla_total_size(sizeof(u32)) + /* MDBA_ROUTER_PATTR_TIMER */
++	       nla_total_size(sizeof(u8)) +  /* MDBA_ROUTER_PATTR_TYPE */
++	       nla_total_size(sizeof(u32)) + /* MDBA_ROUTER_PATTR_INET_TIMER */
++	       nla_total_size(sizeof(u32)) + /* MDBA_ROUTER_PATTR_INET6_TIMER */
++	       nla_total_size(sizeof(u32));  /* MDBA_ROUTER_PATTR_VID */
++}
++
++size_t br_rports_size(const struct net_bridge_mcast *brmctx)
++{
++	struct net_bridge_mcast_port *pmctx;
++	size_t size = nla_total_size(0); /* MDBA_ROUTER */
++
++	rcu_read_lock();
++	hlist_for_each_entry_rcu(pmctx, &brmctx->ip4_mc_router_list,
++				 ip4_rlist)
++		size += __br_rports_one_size();
++
++#if IS_ENABLED(CONFIG_IPV6)
++	hlist_for_each_entry_rcu(pmctx, &brmctx->ip6_mc_router_list,
++				 ip6_rlist)
++		size += __br_rports_one_size();
++#endif
++	rcu_read_unlock();
++
++	return size;
++}
++
++int br_rports_fill_info(struct sk_buff *skb, struct netlink_callback *cb,
+ 			       struct net_device *dev)
+ {
+ 	struct net_bridge *br = netdev_priv(dev);
+diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
+index cbcbc19..887e767 100644
+--- a/net/bridge/br_netlink.c
++++ b/net/bridge/br_netlink.c
+@@ -562,7 +562,7 @@ static int br_vlan_info(struct net_bridge *br, struct net_bridge_port *p,
+ 	return err;
+ }
+ 
+-static int br_process_vlan_info(struct net_bridge *br,
++int br_process_vlan_info(struct net_bridge *br,
+ 				struct net_bridge_port *p, int cmd,
+ 				struct bridge_vlan_info *vinfo_curr,
+ 				struct bridge_vlan_info **vinfo_last,
+@@ -1578,7 +1578,7 @@ static int br_fill_linkxstats(struct sk_buff *skb,
+ 		pvid = br_get_pvid(vg);
+ 		list_for_each_entry(v, &vg->vlan_list, vlist) {
+ 			struct bridge_vlan_xstats vxi;
+-			struct br_vlan_stats stats;
++			struct pcpu_sw_netstats stats;
+ 
+ 			if (++vl_idx < *prividx)
+ 				continue;
+@@ -1652,6 +1652,7 @@ int __init br_netlink_init(void)
+ 	int err;
+ 
+ 	br_mdb_init();
++	br_vlan_rtnl_init();
+ 	rtnl_af_register(&br_af_ops);
+ 
+ 	err = rtnl_link_register(&br_link_ops);
+@@ -1669,6 +1670,7 @@ int __init br_netlink_init(void)
+ void br_netlink_fini(void)
+ {
+ 	br_mdb_uninit();
++	br_vlan_rtnl_uninit();
+ 	rtnl_af_unregister(&br_af_ops);
+ 	rtnl_link_unregister(&br_link_ops);
+ }
+diff --git a/net/bridge/br_netlink_tunnel.c b/net/bridge/br_netlink_tunnel.c
+index afee292..3bbbd66 100644
+--- a/net/bridge/br_netlink_tunnel.c
++++ b/net/bridge/br_netlink_tunnel.c
+@@ -26,7 +26,7 @@ static size_t __get_vlan_tinfo_size(void)
+ 		  nla_total_size(sizeof(u16)); /* IFLA_BRIDGE_VLAN_TUNNEL_FLAGS */
+ }
+ 
+-static bool vlan_tunid_inrange(struct net_bridge_vlan *v_curr,
++bool vlan_tunid_inrange(struct net_bridge_vlan *v_curr,
+ 			       struct net_bridge_vlan *v_last)
+ {
+ 	__be32 tunid_curr = tunnel_id_to_key32(v_curr->tinfo.tunnel_id);
+@@ -193,7 +193,7 @@ static const struct nla_policy vlan_tunnel_policy[IFLA_BRIDGE_VLAN_TUNNEL_MAX +
+ 	[IFLA_BRIDGE_VLAN_TUNNEL_FLAGS] = { .type = NLA_U16 },
+ };
+ 
+-static int br_vlan_tunnel_info(struct net_bridge_port *p, int cmd,
++int br_vlan_tunnel_info(struct net_bridge_port *p, int cmd,
+ 			       u16 vid, u32 tun_id, bool *changed)
+ {
+ 	int err = 0;
+diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
+index 4bd9e9b..4620f70 100644
+--- a/net/bridge/br_private.h
++++ b/net/bridge/br_private.h
+@@ -95,6 +95,60 @@ struct br_vlan_stats {
+ 	struct u64_stats_sync syncp;
+ };
+ 
++/* net_bridge_mcast_port must be always defined due to forwarding stubs */
++struct net_bridge_mcast_port {
++#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
++	struct net_bridge_port		*port;
++	struct net_bridge_vlan		*vlan;
++
++	struct bridge_mcast_own_query	ip4_own_query;
++	struct timer_list		ip4_mc_router_timer;
++	struct hlist_node		ip4_rlist;
++#if IS_ENABLED(CONFIG_IPV6)
++	struct bridge_mcast_own_query	ip6_own_query;
++	struct timer_list		ip6_mc_router_timer;
++	struct hlist_node		ip6_rlist;
++#endif /* IS_ENABLED(CONFIG_IPV6) */
++	unsigned char			multicast_router;
++#endif /* CONFIG_BRIDGE_IGMP_SNOOPING */
++};
++
++/* net_bridge_mcast must be always defined due to forwarding stubs */
++struct net_bridge_mcast {
++#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
++	struct net_bridge		*br;
++	struct net_bridge_vlan		*vlan;
++
++	u32				multicast_last_member_count;
++	u32				multicast_startup_query_count;
++
++	u8				multicast_querier;
++	u8				multicast_igmp_version;
++	u8				multicast_router;
++#if IS_ENABLED(CONFIG_IPV6)
++	u8				multicast_mld_version;
++#endif
++	unsigned long			multicast_last_member_interval;
++	unsigned long			multicast_membership_interval;
++	unsigned long			multicast_querier_interval;
++	unsigned long			multicast_query_interval;
++	unsigned long			multicast_query_response_interval;
++	unsigned long			multicast_startup_query_interval;
++	struct hlist_head		ip4_mc_router_list;
++	struct timer_list		ip4_mc_router_timer;
++	struct bridge_mcast_other_query	ip4_other_query;
++	struct bridge_mcast_own_query	ip4_own_query;
++	struct bridge_mcast_querier	ip4_querier;
++#if IS_ENABLED(CONFIG_IPV6)
++	struct hlist_head		ip6_mc_router_list;
++	struct timer_list		ip6_mc_router_timer;
++	struct bridge_mcast_other_query	ip6_other_query;
++	struct bridge_mcast_own_query	ip6_own_query;
++	struct bridge_mcast_querier	ip6_querier;
++#endif /* IS_ENABLED(CONFIG_IPV6) */
++#endif /* CONFIG_BRIDGE_IGMP_SNOOPING */
++};
++
+ struct br_tunnel_info {
+ 	__be64				tunnel_id;
+ 	struct metadata_dst __rcu	*tunnel_dst;
+@@ -104,6 +158,8 @@ struct br_tunnel_info {
+ enum {
+ 	BR_VLFLAG_PER_PORT_STATS = BIT(0),
+ 	BR_VLFLAG_ADDED_BY_SWITCHDEV = BIT(1),
++	BR_VLFLAG_MCAST_ENABLED = BIT(2),
++	BR_VLFLAG_GLOBAL_MCAST_ENABLED = BIT(3),
+ };
+ 
+ /**
+@@ -113,12 +169,16 @@ enum {
+  * @vid: VLAN id
+  * @flags: bridge vlan flags
+  * @priv_flags: private (in-kernel) bridge vlan flags
++ * @state: STP state (e.g. blocking, learning, forwarding)
+  * @stats: per-cpu VLAN statistics
+  * @br: if MASTER flag set, this points to a bridge struct
+  * @port: if MASTER flag unset, this points to a port struct
+  * @refcnt: if MASTER flag set, this is bumped for each port referencing it
+  * @brvlan: if MASTER flag unset, this points to the global per-VLAN context
+  *          for this VLAN entry
++ * @br_mcast_ctx: if MASTER flag set, this is the global vlan multicast context
++ * @port_mcast_ctx: if MASTER flag unset, this is the per-port/vlan multicast
++ *                  context
+  * @vlist: sorted list of VLAN entries
+  * @rcu: used for entry destruction
+  *
+@@ -133,7 +193,8 @@ struct net_bridge_vlan {
+ 	u16				vid;
+ 	u16				flags;
+ 	u16				priv_flags;
+-	struct br_vlan_stats __percpu	*stats;
++	u8				state;
++	struct pcpu_sw_netstats __percpu *stats;
+ 	union {
+ 		struct net_bridge	*br;
+ 		struct net_bridge_port	*port;
+@@ -145,6 +206,11 @@ struct net_bridge_vlan {
+ 
+ 	struct br_tunnel_info		tinfo;
+ 
++	union {
++		struct net_bridge_mcast		br_mcast_ctx;
++		struct net_bridge_mcast_port	port_mcast_ctx;
++	};
++
+ 	struct list_head		vlist;
+ 
+ 	struct rcu_head			rcu;
+@@ -170,6 +236,7 @@ struct net_bridge_vlan_group {
+ 	struct list_head		vlan_list;
+ 	u16				num_vlans;
+ 	u16				pvid;
++	u8				pvid_state;
+ };
+ 
+ struct net_bridge_fdb_key {
+@@ -497,6 +564,67 @@ static inline bool br_vlan_should_use(const struct net_bridge_vlan *v)
+ 	return true;
+ }
+ 
++static inline bool br_vlan_valid_id(u16 vid, struct netlink_ext_ack *extack)
++{
++	bool ret = vid > 0 && vid < VLAN_VID_MASK;
++
++	if (!ret)
++		NL_SET_ERR_MSG_MOD(extack, "Vlan id is invalid");
++
++	return ret;
++}
++
++static inline bool br_vlan_valid_range(const struct bridge_vlan_info *cur,
++				       const struct bridge_vlan_info *last,
++				       struct netlink_ext_ack *extack)
++{
++	/* pvid flag is not allowed in ranges */
++	if (cur->flags & BRIDGE_VLAN_INFO_PVID) {
++		NL_SET_ERR_MSG_MOD(extack, "Pvid isn't allowed in a range");
++		return false;
++	}
++
++	/* when cur is the range end, check if:
++	 *  - it has range start flag
++	 *  - range ids are invalid (end is equal to or before start)
++	 */
++	if (last) {
++		if (cur->flags & BRIDGE_VLAN_INFO_RANGE_BEGIN) {
++			NL_SET_ERR_MSG_MOD(extack, "Found a new vlan range start while processing one");
++			return false;
++		} else if (!(cur->flags & BRIDGE_VLAN_INFO_RANGE_END)) {
++			NL_SET_ERR_MSG_MOD(extack, "Vlan range end flag is missing");
++			return false;
++		} else if (cur->vid <= last->vid) {
++			NL_SET_ERR_MSG_MOD(extack, "End vlan id is less than or equal to start vlan id");
++			return false;
++		}
++	}
++
++	/* check for required range flags */
++	if (!(cur->flags & (BRIDGE_VLAN_INFO_RANGE_BEGIN |
++			    BRIDGE_VLAN_INFO_RANGE_END))) {
++		NL_SET_ERR_MSG_MOD(extack, "Both vlan range flags are missing");
++		return false;
++	}
++
++	return true;
++}
++
++static inline u8 br_vlan_multicast_router(const struct net_bridge_vlan *v)
++{
++	u8 mcast_router = MDB_RTR_TYPE_DISABLED;
++
++#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
++	if (!br_vlan_is_master(v))
++		mcast_router = v->port_mcast_ctx.multicast_router;
++	else
++		mcast_router = v->br_mcast_ctx.multicast_router;
++#endif
++
++	return mcast_router;
++}
++
+ static inline int br_opt_get(const struct net_bridge *br,
+ 			     enum net_bridge_opts opt)
+ {
+@@ -676,8 +804,10 @@ void br_multicast_flood(struct net_bridge_mdb_entry *mdst,
+ 			struct sk_buff *skb, bool local_rcv, bool local_orig);
+ int br_multicast_set_router(struct net_bridge *br, unsigned long val);
+ int br_multicast_set_port_router(struct net_bridge_port *p, unsigned long val);
++int br_multicast_set_vlan_router(struct net_bridge_vlan *v, u8 mcast_router);
+ int br_multicast_toggle(struct net_bridge *br, unsigned long val);
+ int br_multicast_set_querier(struct net_bridge *br, unsigned long val);
++
+ int br_multicast_set_hash_max(struct net_bridge *br, unsigned long val);
+ int br_multicast_set_igmp_version(struct net_bridge *br, unsigned long val);
+ #if IS_ENABLED(CONFIG_IPV6)
+@@ -708,6 +838,17 @@ void br_mdb_init(void);
+ void br_mdb_uninit(void);
+ void br_multicast_host_join(struct net_bridge_mdb_entry *mp, bool notify);
+ void br_multicast_host_leave(struct net_bridge_mdb_entry *mp, bool notify);
++int br_rports_fill_info(struct sk_buff *skb, struct netlink_callback *cb,
++			       struct net_device *dev);
++int br_multicast_dump_querier_state(struct sk_buff *skb,
++				    const struct net_bridge_mcast *brmctx,
++				    int nest_attr);
++size_t br_multicast_querier_state_size(void);
++size_t br_rports_size(const struct net_bridge_mcast *brmctx);
++void br_multicast_set_query_intvl(struct net_bridge_mcast *brmctx,
++				  unsigned long val);
++void br_multicast_set_startup_query_intvl(struct net_bridge_mcast *brmctx,
++					  unsigned long val);
+ 
+ #define mlock_dereference(X, br) \
+ 	rcu_dereference_protected(X, lockdep_is_held(&br->multicast_lock))
+@@ -760,6 +901,49 @@ static inline int br_multicast_igmp_type(const struct sk_buff *skb)
+ {
+ 	return BR_INPUT_SKB_CB(skb)->igmp;
+ }
++static inline bool
++br_rports_have_mc_router(const struct net_bridge_mcast *brmctx)
++{
++#if IS_ENABLED(CONFIG_IPV6)
++	return !hlist_empty(&brmctx->ip4_mc_router_list) ||
++	       !hlist_empty(&brmctx->ip6_mc_router_list);
++#else
++	return !hlist_empty(&brmctx->ip4_mc_router_list);
++#endif
++}
++
++static inline bool
++br_multicast_ctx_options_equal(const struct net_bridge_mcast *brmctx1,
++			       const struct net_bridge_mcast *brmctx2)
++{
++	return brmctx1->multicast_igmp_version ==
++	       brmctx2->multicast_igmp_version &&
++	       brmctx1->multicast_last_member_count ==
++	       brmctx2->multicast_last_member_count &&
++	       brmctx1->multicast_startup_query_count ==
++	       brmctx2->multicast_startup_query_count &&
++	       brmctx1->multicast_last_member_interval ==
++	       brmctx2->multicast_last_member_interval &&
++	       brmctx1->multicast_membership_interval ==
++	       brmctx2->multicast_membership_interval &&
++	       brmctx1->multicast_querier_interval ==
++	       brmctx2->multicast_querier_interval &&
++	       brmctx1->multicast_query_interval ==
++	       brmctx2->multicast_query_interval &&
++	       brmctx1->multicast_query_response_interval ==
++	       brmctx2->multicast_query_response_interval &&
++	       brmctx1->multicast_startup_query_interval ==
++	       brmctx2->multicast_startup_query_interval &&
++	       brmctx1->multicast_querier == brmctx2->multicast_querier &&
++	       brmctx1->multicast_router == brmctx2->multicast_router &&
++	       !br_rports_have_mc_router(brmctx1) &&
++	       !br_rports_have_mc_router(brmctx2) &&
++#if IS_ENABLED(CONFIG_IPV6)
++	       brmctx1->multicast_mld_version ==
++	       brmctx2->multicast_mld_version &&
++#endif
++	       true;
++}
+ #else
+ static inline int br_multicast_rcv(struct net_bridge *br,
+ 				   struct net_bridge_port *port,
+@@ -907,10 +1091,21 @@ void nbp_vlan_flush(struct net_bridge_port *port);
+ int nbp_vlan_init(struct net_bridge_port *port, struct netlink_ext_ack *extack);
+ int nbp_get_num_vlan_infos(struct net_bridge_port *p, u32 filter_mask);
+ void br_vlan_get_stats(const struct net_bridge_vlan *v,
+-		       struct br_vlan_stats *stats);
++		       struct pcpu_sw_netstats *stats);
+ void br_vlan_port_event(struct net_bridge_port *p, unsigned long event);
+ int br_vlan_bridge_event(struct net_device *dev, unsigned long event,
+ 			 void *ptr);
++void br_vlan_rtnl_init(void);
++void br_vlan_rtnl_uninit(void);
++void br_vlan_notify(const struct net_bridge *br,
++		    const struct net_bridge_port *p,
++		    u16 vid, u16 vid_range,
++		    int cmd);
++int br_vlan_replay(struct net_device *br_dev, struct net_device *dev,
++		   const void *ctx, bool adding, struct notifier_block *nb,
++		   struct netlink_ext_ack *extack);
++bool br_vlan_can_enter_range(struct net_bridge_vlan *v_curr,
++			     struct net_bridge_vlan *range_end);
+ 
+ void br_vlan_fill_forward_path_pvid(struct net_bridge *br,
+ 				    struct net_device_path_ctx *ctx,
+@@ -969,6 +1164,10 @@ static inline u16 br_get_pvid(const struct net_bridge_vlan_group *vg)
+ 	return vg->pvid;
+ }
+ 
++static inline u16 br_vlan_flags(const struct net_bridge_vlan *v, u16 pvid)
++{
++	return v->vid == pvid ? v->flags | BRIDGE_VLAN_INFO_PVID : v->flags;
++}
+ #else
+ static inline bool br_allowed_ingress(const struct net_bridge *br,
+ 				      struct net_bridge_vlan_group *vg,
+@@ -1111,7 +1310,7 @@ static inline struct net_bridge_vlan_group *nbp_vlan_group_rcu(
+ }
+ 
+ static inline void br_vlan_get_stats(const struct net_bridge_vlan *v,
+-				     struct br_vlan_stats *stats)
++				     struct pcpu_sw_netstats *stats)
+ {
+ }
+ 
+@@ -1125,6 +1324,88 @@ static inline int br_vlan_bridge_event(struct net_device *dev,
+ {
+ 	return 0;
+ }
++
++static inline void br_vlan_rtnl_init(void)
++{
++}
++
++static inline void br_vlan_rtnl_uninit(void)
++{
++}
++
++static inline void br_vlan_notify(const struct net_bridge *br,
++				  const struct net_bridge_port *p,
++				  u16 vid, u16 vid_range,
++				  int cmd)
++{
++}
++
++static inline bool br_vlan_can_enter_range(const struct net_bridge_vlan *v_curr,
++					   const struct net_bridge_vlan *range_end)
++{
++	return true;
++}
++
++static inline int br_vlan_replay(struct net_device *br_dev,
++				 struct net_device *dev, const void *ctx,
++				 bool adding, struct notifier_block *nb,
++				 struct netlink_ext_ack *extack)
++{
++	return -EOPNOTSUPP;
++}
++#endif
++
++/* br_vlan_options.c */
++#ifdef CONFIG_BRIDGE_VLAN_FILTERING
++bool br_vlan_opts_eq_range(struct net_bridge_vlan *v_curr,
++			   struct net_bridge_vlan *range_end);
++bool br_vlan_opts_fill(struct sk_buff *skb, const struct net_bridge_vlan *v);
++size_t br_vlan_opts_nl_size(void);
++int br_vlan_process_options(const struct net_bridge *br,
++			    struct net_bridge_port *p,
++			    struct net_bridge_vlan *range_start,
++			    struct net_bridge_vlan *range_end,
++			    struct nlattr **tb,
++			    struct netlink_ext_ack *extack);
++bool br_vlan_global_opts_can_enter_range(const struct net_bridge_vlan *v_curr,
++					 const struct net_bridge_vlan *r_end);
++bool br_vlan_global_opts_fill(struct sk_buff *skb, u16 vid, u16 vid_range,
++			      const struct net_bridge_vlan *v_opts);
++
++/* vlan state manipulation helpers using *_ONCE to annotate lock-free access */
++static inline u8 br_vlan_get_state(const struct net_bridge_vlan *v)
++{
++	return READ_ONCE(v->state);
++}
++
++static inline void br_vlan_set_state(struct net_bridge_vlan *v, u8 state)
++{
++	WRITE_ONCE(v->state, state);
++}
++
++static inline u8 br_vlan_get_pvid_state(const struct net_bridge_vlan_group *vg)
++{
++	return READ_ONCE(vg->pvid_state);
++}
++
++static inline void br_vlan_set_pvid_state(struct net_bridge_vlan_group *vg,
++					  u8 state)
++{
++	WRITE_ONCE(vg->pvid_state, state);
++}
++
++/* learn_allow is true at ingress and false at egress */
++static inline bool br_vlan_state_allowed(u8 state, bool learn_allow)
++{
++	switch (state) {
++	case BR_STATE_LEARNING:
++		return learn_allow;
++	case BR_STATE_FORWARDING:
++		return true;
++	default:
++		return false;
++	}
++}
+ #endif
+ 
+ struct nf_br_ops {
+@@ -1196,6 +1477,12 @@ int br_setlink(struct net_device *dev, struct nlmsghdr *nlmsg, u16 flags,
+ int br_dellink(struct net_device *dev, struct nlmsghdr *nlmsg, u16 flags);
+ int br_getlink(struct sk_buff *skb, u32 pid, u32 seq, struct net_device *dev,
+ 	       u32 filter_mask, int nlflags);
++int br_process_vlan_info(struct net_bridge *br,
++			 struct net_bridge_port *p, int cmd,
++			 struct bridge_vlan_info *vinfo_curr,
++			 struct bridge_vlan_info **vinfo_last,
++			 bool *changed,
++			 struct netlink_ext_ack *extack);
+ 
+ #ifdef CONFIG_SYSFS
+ /* br_sysfs_if.c */
+diff --git a/net/bridge/br_private_tunnel.h b/net/bridge/br_private_tunnel.h
+index 2bdef2e..25be963 100644
+--- a/net/bridge/br_private_tunnel.h
++++ b/net/bridge/br_private_tunnel.h
+@@ -42,6 +42,10 @@ int br_handle_ingress_vlan_tunnel(struct sk_buff *skb,
+ 				  struct net_bridge_vlan_group *vg);
+ int br_handle_egress_vlan_tunnel(struct sk_buff *skb,
+ 				 struct net_bridge_vlan *vlan);
++bool vlan_tunid_inrange(struct net_bridge_vlan *v_curr,
++			struct net_bridge_vlan *v_last);
++int br_vlan_tunnel_info(struct net_bridge_port *p, int cmd,
++			u16 vid, u32 tun_id, bool *changed);
+ #else
+ static inline int vlan_tunnel_init(struct net_bridge_vlan_group *vg)
+ {
+diff --git a/net/bridge/br_vlan.c b/net/bridge/br_vlan.c
+index bcfd169..2b5950c 100644
+--- a/net/bridge/br_vlan.c
++++ b/net/bridge/br_vlan.c
+@@ -34,13 +34,15 @@ static struct net_bridge_vlan *br_vlan_lookup(struct rhashtable *tbl, u16 vid)
+ 	return rhashtable_lookup_fast(tbl, &vid, br_vlan_rht_params);
+ }
+ 
+-static bool __vlan_add_pvid(struct net_bridge_vlan_group *vg, u16 vid)
++static bool __vlan_add_pvid(struct net_bridge_vlan_group *vg,
++			    const struct net_bridge_vlan *v)
+ {
+-	if (vg->pvid == vid)
++	if (vg->pvid == v->vid)
+ 		return false;
+ 
+ 	smp_wmb();
+-	vg->pvid = vid;
++	br_vlan_set_pvid_state(vg, v->state);
++	vg->pvid = v->vid;
+ 
+ 	return true;
+ }
+@@ -69,7 +71,7 @@ static bool __vlan_add_flags(struct net_bridge_vlan *v, u16 flags)
+ 		vg = nbp_vlan_group(v->port);
+ 
+ 	if (flags & BRIDGE_VLAN_INFO_PVID)
+-		ret = __vlan_add_pvid(vg, v->vid);
++		ret = __vlan_add_pvid(vg, v);
+ 	else
+ 		ret = __vlan_delete_pvid(vg, v->vid);
+ 
+@@ -257,6 +259,10 @@ static int __vlan_add(struct net_bridge_vlan *v, u16 flags,
+ 					  &changed, extack);
+ 			if (err)
+ 				goto out_filt;
++
++			if (changed)
++				br_vlan_notify(br, NULL, v->vid, 0,
++					       RTM_NEWVLAN);
+ 		}
+ 
+ 		masterv = br_vlan_get_master(br, v->vid, extack);
+@@ -266,7 +272,7 @@ static int __vlan_add(struct net_bridge_vlan *v, u16 flags,
+ 		}
+ 		v->brvlan = masterv;
+ 		if (br_opt_get(br, BROPT_VLAN_STATS_PER_PORT)) {
+-			v->stats = netdev_alloc_pcpu_stats(struct br_vlan_stats);
++			v->stats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
+ 			if (!v->stats) {
+ 				err = -ENOMEM;
+ 				goto out_filt;
+@@ -382,13 +388,31 @@ static void __vlan_group_free(struct net_bridge_vlan_group *vg)
+ 	kfree(vg);
+ }
+ 
+-static void __vlan_flush(struct net_bridge_vlan_group *vg)
++static void __vlan_flush(const struct net_bridge *br,
++			 const struct net_bridge_port *p,
++			 struct net_bridge_vlan_group *vg)
+ {
+ 	struct net_bridge_vlan *vlan, *tmp;
++	u16 v_start = 0, v_end = 0;
+ 
+ 	__vlan_delete_pvid(vg, vg->pvid);
+-	list_for_each_entry_safe(vlan, tmp, &vg->vlan_list, vlist)
++	list_for_each_entry_safe(vlan, tmp, &vg->vlan_list, vlist) {
++		/* take care of disjoint ranges */
++		if (!v_start) {
++			v_start = vlan->vid;
++		} else if (vlan->vid - v_end != 1) {
++			/* found range end, notify and start next one */
++			br_vlan_notify(br, p, v_start, v_end, RTM_DELVLAN);
++			v_start = vlan->vid;
++		}
++		v_end = vlan->vid;
++
+ 		__vlan_del(vlan);
++	}
++
++	/* notify about the last/whole vlan range */
++	if (v_start)
++		br_vlan_notify(br, p, v_start, v_end, RTM_DELVLAN);
+ }
+ 
+ struct sk_buff *br_handle_vlan(struct net_bridge *br,
+@@ -396,7 +420,7 @@ struct sk_buff *br_handle_vlan(struct net_bridge *br,
+ 			       struct net_bridge_vlan_group *vg,
+ 			       struct sk_buff *skb)
+ {
+-	struct br_vlan_stats *stats;
++	struct pcpu_sw_netstats *stats;
+ 	struct net_bridge_vlan *v;
+ 	u16 vid;
+ 
+@@ -448,7 +472,7 @@ static bool __allowed_ingress(const struct net_bridge *br,
+ 			      struct net_bridge_vlan_group *vg,
+ 			      struct sk_buff *skb, u16 *vid)
+ {
+-	struct br_vlan_stats *stats;
++	struct pcpu_sw_netstats *stats;
+ 	struct net_bridge_vlan *v;
+ 	bool tagged;
+ 
+@@ -666,7 +690,7 @@ int br_vlan_add(struct net_bridge *br, u16 vid, u16 flags, bool *changed,
+ 	if (!vlan)
+ 		return -ENOMEM;
+ 
+-	vlan->stats = netdev_alloc_pcpu_stats(struct br_vlan_stats);
++	vlan->stats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
+ 	if (!vlan->stats) {
+ 		kfree(vlan);
+ 		return -ENOMEM;
+@@ -718,7 +742,7 @@ void br_vlan_flush(struct net_bridge *br)
+ 	ASSERT_RTNL();
+ 
+ 	vg = br_vlan_group(br);
+-	__vlan_flush(vg);
++	__vlan_flush(br, NULL, vg);
+ 	RCU_INIT_POINTER(br->vlgrp, NULL);
+ 	synchronize_rcu();
+ 	__vlan_group_free(vg);
+@@ -927,12 +951,15 @@ static void br_vlan_disable_default_pvid(struct net_bridge *br)
+ 	/* Disable default_pvid on all ports where it is still
+ 	 * configured.
+ 	 */
+-	if (vlan_default_pvid(br_vlan_group(br), pvid))
+-		br_vlan_delete(br, pvid);
++	if (vlan_default_pvid(br_vlan_group(br), pvid)) {
++		if (!br_vlan_delete(br, pvid))
++			br_vlan_notify(br, NULL, pvid, 0, RTM_DELVLAN);
++	}
+ 
+ 	list_for_each_entry(p, &br->port_list, list) {
+-		if (vlan_default_pvid(nbp_vlan_group(p), pvid))
+-			nbp_vlan_delete(p, pvid);
++		if (vlan_default_pvid(nbp_vlan_group(p), pvid) &&
++		    !nbp_vlan_delete(p, pvid))
++			br_vlan_notify(br, p, pvid, 0, RTM_DELVLAN);
+ 	}
+ 
+ 	br->default_pvid = 0;
+@@ -974,7 +1001,10 @@ int __br_vlan_set_default_pvid(struct net_bridge *br, u16 pvid,
+ 				  &vlchange, extack);
+ 		if (err)
+ 			goto out;
+-		br_vlan_delete(br, old_pvid);
++
++		if (br_vlan_delete(br, old_pvid))
++			br_vlan_notify(br, NULL, old_pvid, 0, RTM_DELVLAN);
++		br_vlan_notify(br, NULL, pvid, 0, RTM_NEWVLAN);
+ 		set_bit(0, changed);
+ 	}
+ 
+@@ -994,7 +1024,9 @@ int __br_vlan_set_default_pvid(struct net_bridge *br, u16 pvid,
+ 				   &vlchange, extack);
+ 		if (err)
+ 			goto err_port;
+-		nbp_vlan_delete(p, old_pvid);
++		if (nbp_vlan_delete(p, old_pvid))
++			br_vlan_notify(br, p, old_pvid, 0, RTM_DELVLAN);
++		br_vlan_notify(p->br, p, pvid, 0, RTM_NEWVLAN);
+ 		set_bit(p->port_no, changed);
+ 	}
+ 
+@@ -1009,22 +1041,28 @@ int __br_vlan_set_default_pvid(struct net_bridge *br, u16 pvid,
+ 		if (!test_bit(p->port_no, changed))
+ 			continue;
+ 
+-		if (old_pvid)
++		if (old_pvid) {
+ 			nbp_vlan_add(p, old_pvid,
+ 				     BRIDGE_VLAN_INFO_PVID |
+ 				     BRIDGE_VLAN_INFO_UNTAGGED,
+ 				     &vlchange, NULL);
++			br_vlan_notify(p->br, p, old_pvid, 0, RTM_NEWVLAN);
++		}
+ 		nbp_vlan_delete(p, pvid);
++		br_vlan_notify(br, p, pvid, 0, RTM_DELVLAN);
+ 	}
+ 
+ 	if (test_bit(0, changed)) {
+-		if (old_pvid)
++		if (old_pvid) {
+ 			br_vlan_add(br, old_pvid,
+ 				    BRIDGE_VLAN_INFO_PVID |
+ 				    BRIDGE_VLAN_INFO_UNTAGGED |
+ 				    BRIDGE_VLAN_INFO_BRENTRY,
+ 				    &vlchange, NULL);
++			br_vlan_notify(br, NULL, old_pvid, 0, RTM_NEWVLAN);
++		}
+ 		br_vlan_delete(br, pvid);
++		br_vlan_notify(br, NULL, pvid, 0, RTM_DELVLAN);
+ 	}
+ 	goto out;
+ }
+@@ -1117,6 +1155,7 @@ int nbp_vlan_init(struct net_bridge_port *p, struct netlink_ext_ack *extack)
+ 				   &changed, extack);
+ 		if (ret)
+ 			goto err_vlan_add;
++		br_vlan_notify(p->br, p, p->br->default_pvid, 0, RTM_NEWVLAN);
+ 	}
+ out:
+ 	return ret;
+@@ -1198,21 +1237,21 @@ void nbp_vlan_flush(struct net_bridge_port *port)
+ 	ASSERT_RTNL();
+ 
+ 	vg = nbp_vlan_group(port);
+-	__vlan_flush(vg);
++	__vlan_flush(port->br, port, vg);
+ 	RCU_INIT_POINTER(port->vlgrp, NULL);
+ 	synchronize_rcu();
+ 	__vlan_group_free(vg);
+ }
+ 
+ void br_vlan_get_stats(const struct net_bridge_vlan *v,
+-		       struct br_vlan_stats *stats)
++		       struct pcpu_sw_netstats *stats)
+ {
+ 	int i;
+ 
+ 	memset(stats, 0, sizeof(*stats));
+ 	for_each_possible_cpu(i) {
+ 		u64 rxpackets, rxbytes, txpackets, txbytes;
+-		struct br_vlan_stats *cpu_stats;
++		struct pcpu_sw_netstats *cpu_stats;
+ 		unsigned int start;
+ 
+ 		cpu_stats = per_cpu_ptr(v->stats, i);
+@@ -1526,8 +1565,8 @@ int br_vlan_bridge_event(struct net_device *dev, unsigned long event, void *ptr)
+ {
+ 	struct netdev_notifier_changeupper_info *info;
+ 	struct net_bridge *br = netdev_priv(dev);
+-	bool changed;
+-	int ret = 0;
++	int vlcmd = 0, ret = 0;
++	bool changed = false;
+ 
+ 	switch (event) {
+ 	case NETDEV_REGISTER:
+@@ -1535,9 +1574,11 @@ int br_vlan_bridge_event(struct net_device *dev, unsigned long event, void *ptr)
+ 				  BRIDGE_VLAN_INFO_PVID |
+ 				  BRIDGE_VLAN_INFO_UNTAGGED |
+ 				  BRIDGE_VLAN_INFO_BRENTRY, &changed, NULL);
++		vlcmd = RTM_NEWVLAN;
+ 		break;
+ 	case NETDEV_UNREGISTER:
+-		br_vlan_delete(br, br->default_pvid);
++		changed = !br_vlan_delete(br, br->default_pvid);
++		vlcmd = RTM_DELVLAN;
+ 		break;
+ 	case NETDEV_CHANGEUPPER:
+ 		info = ptr;
+@@ -1551,6 +1592,8 @@ int br_vlan_bridge_event(struct net_device *dev, unsigned long event, void *ptr)
+ 		br_vlan_link_state_change(dev, br);
+ 		break;
+ 	}
++	if (changed)
++		br_vlan_notify(br, NULL, br->default_pvid, 0, vlcmd);
+ 
+ 	return ret;
+ }
+@@ -1569,3 +1612,608 @@ void br_vlan_port_event(struct net_bridge_port *p, unsigned long event)
+ 		break;
+ 	}
+ }
++
++static bool br_vlan_stats_fill(struct sk_buff *skb,
++			       const struct net_bridge_vlan *v)
++{
++	struct pcpu_sw_netstats stats;
++	struct nlattr *nest;
++
++	nest = nla_nest_start(skb, BRIDGE_VLANDB_ENTRY_STATS);
++	if (!nest)
++		return false;
++
++	br_vlan_get_stats(v, &stats);
++	if (nla_put_u64_64bit(skb, BRIDGE_VLANDB_STATS_RX_BYTES, stats.rx_bytes,
++			      BRIDGE_VLANDB_STATS_PAD) ||
++	    nla_put_u64_64bit(skb, BRIDGE_VLANDB_STATS_RX_PACKETS,
++			      stats.rx_packets, BRIDGE_VLANDB_STATS_PAD) ||
++	    nla_put_u64_64bit(skb, BRIDGE_VLANDB_STATS_TX_BYTES, stats.tx_bytes,
++			      BRIDGE_VLANDB_STATS_PAD) ||
++	    nla_put_u64_64bit(skb, BRIDGE_VLANDB_STATS_TX_PACKETS,
++			      stats.tx_packets, BRIDGE_VLANDB_STATS_PAD))
++		goto out_err;
++
++	nla_nest_end(skb, nest);
++
++	return true;
++
++out_err:
++	nla_nest_cancel(skb, nest);
++	return false;
++}
++
++/* v_opts is used to dump the options which must be equal in the whole range */
++static bool br_vlan_fill_vids(struct sk_buff *skb, u16 vid, u16 vid_range,
++			      const struct net_bridge_vlan *v_opts,
++			      u16 flags,
++			      bool dump_stats)
++{
++	struct bridge_vlan_info info;
++	struct nlattr *nest;
++
++	nest = nla_nest_start(skb, BRIDGE_VLANDB_ENTRY);
++	if (!nest)
++		return false;
++
++	memset(&info, 0, sizeof(info));
++	info.vid = vid;
++	if (flags & BRIDGE_VLAN_INFO_UNTAGGED)
++		info.flags |= BRIDGE_VLAN_INFO_UNTAGGED;
++	if (flags & BRIDGE_VLAN_INFO_PVID)
++		info.flags |= BRIDGE_VLAN_INFO_PVID;
++
++	if (nla_put(skb, BRIDGE_VLANDB_ENTRY_INFO, sizeof(info), &info))
++		goto out_err;
++
++	if (vid_range && vid < vid_range &&
++	    !(flags & BRIDGE_VLAN_INFO_PVID) &&
++	    nla_put_u16(skb, BRIDGE_VLANDB_ENTRY_RANGE, vid_range))
++		goto out_err;
++
++	if (v_opts) {
++		if (!br_vlan_opts_fill(skb, v_opts))
++			goto out_err;
++
++		if (dump_stats && !br_vlan_stats_fill(skb, v_opts))
++			goto out_err;
++	}
++
++	nla_nest_end(skb, nest);
++
++	return true;
++
++out_err:
++	nla_nest_cancel(skb, nest);
++	return false;
++}
++
++static size_t rtnl_vlan_nlmsg_size(void)
++{
++	return NLMSG_ALIGN(sizeof(struct br_vlan_msg))
++		+ nla_total_size(0) /* BRIDGE_VLANDB_ENTRY */
++		+ nla_total_size(sizeof(u16)) /* BRIDGE_VLANDB_ENTRY_RANGE */
++		+ nla_total_size(sizeof(struct bridge_vlan_info)) /* BRIDGE_VLANDB_ENTRY_INFO */
++		+ br_vlan_opts_nl_size(); /* bridge vlan options */
++}
++
++void br_vlan_notify(const struct net_bridge *br,
++		    const struct net_bridge_port *p,
++		    u16 vid, u16 vid_range,
++		    int cmd)
++{
++	struct net_bridge_vlan_group *vg;
++	struct net_bridge_vlan *v = NULL;
++	struct br_vlan_msg *bvm;
++	struct nlmsghdr *nlh;
++	struct sk_buff *skb;
++	int err = -ENOBUFS;
++	struct net *net;
++	u16 flags = 0;
++	int ifindex;
++
++	/* right now notifications are done only with rtnl held */
++	ASSERT_RTNL();
++
++	if (p) {
++		ifindex = p->dev->ifindex;
++		vg = nbp_vlan_group(p);
++		net = dev_net(p->dev);
++	} else {
++		ifindex = br->dev->ifindex;
++		vg = br_vlan_group(br);
++		net = dev_net(br->dev);
++	}
++
++	skb = nlmsg_new(rtnl_vlan_nlmsg_size(), GFP_KERNEL);
++	if (!skb)
++		goto out_err;
++
++	err = -EMSGSIZE;
++	nlh = nlmsg_put(skb, 0, 0, cmd, sizeof(*bvm), 0);
++	if (!nlh)
++		goto out_err;
++	bvm = nlmsg_data(nlh);
++	memset(bvm, 0, sizeof(*bvm));
++	bvm->family = AF_BRIDGE;
++	bvm->ifindex = ifindex;
++
++	switch (cmd) {
++	case RTM_NEWVLAN:
++		/* need to find the vlan due to flags/options */
++		v = br_vlan_find(vg, vid);
++		if (!v || !br_vlan_should_use(v))
++			goto out_kfree;
++
++		flags = v->flags;
++		if (br_get_pvid(vg) == v->vid)
++			flags |= BRIDGE_VLAN_INFO_PVID;
++		break;
++	case RTM_DELVLAN:
++		break;
++	default:
++		goto out_kfree;
++	}
++
++	if (!br_vlan_fill_vids(skb, vid, vid_range, v, flags, false))
++		goto out_err;
++
++	nlmsg_end(skb, nlh);
++	rtnl_notify(skb, net, 0, RTNLGRP_BRVLAN, NULL, GFP_KERNEL);
++	return;
++
++out_err:
++	rtnl_set_sk_err(net, RTNLGRP_BRVLAN, err);
++out_kfree:
++	kfree_skb(skb);
++}
++
++static int br_vlan_replay_one(struct notifier_block *nb,
++			      struct net_device *dev,
++			      struct switchdev_obj_port_vlan *vlan,
++			      const void *ctx, unsigned long action,
++			      struct netlink_ext_ack *extack)
++{
++	struct switchdev_notifier_port_obj_info obj_info = {
++		.info = {
++			.dev = dev,
++			.extack = extack,
++			.ctx = ctx,
++		},
++		.obj = &vlan->obj,
++	};
++	int err;
++
++	err = nb->notifier_call(nb, action, &obj_info);
++	return notifier_to_errno(err);
++}
++
++int br_vlan_replay(struct net_device *br_dev, struct net_device *dev,
++		   const void *ctx, bool adding, struct notifier_block *nb,
++		   struct netlink_ext_ack *extack)
++{
++	struct net_bridge_vlan_group *vg;
++	struct net_bridge_vlan *v;
++	struct net_bridge_port *p;
++	struct net_bridge *br;
++	unsigned long action;
++	int err = 0;
++	u16 pvid;
++
++	ASSERT_RTNL();
++
++	if (!nb)
++		return 0;
++
++	if (!netif_is_bridge_master(br_dev))
++		return -EINVAL;
++
++	if (!netif_is_bridge_master(dev) && !netif_is_bridge_port(dev))
++		return -EINVAL;
++
++	if (netif_is_bridge_master(dev)) {
++		br = netdev_priv(dev);
++		vg = br_vlan_group(br);
++		p = NULL;
++	} else {
++		p = br_port_get_rtnl(dev);
++		if (WARN_ON(!p))
++			return -EINVAL;
++		vg = nbp_vlan_group(p);
++		br = p->br;
++	}
++
++	if (!vg)
++		return 0;
++
++	if (adding)
++		action = SWITCHDEV_PORT_OBJ_ADD;
++	else
++		action = SWITCHDEV_PORT_OBJ_DEL;
++
++	pvid = br_get_pvid(vg);
++
++	list_for_each_entry(v, &vg->vlan_list, vlist) {
++		struct switchdev_obj_port_vlan vlan = {
++			.obj.orig_dev = dev,
++			.obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN,
++			.flags = br_vlan_flags(v, pvid),
++			.vid = v->vid,
++		};
++
++		if (!br_vlan_should_use(v))
++			continue;
++
++		err = br_vlan_replay_one(nb, dev, &vlan, ctx, action, extack);
++		if (err)
++			return err;
++	}
++
++	return err;
++}
++
++/* check if v_curr can enter a range ending in range_end */
++bool br_vlan_can_enter_range(struct net_bridge_vlan *v_curr,
++			     struct net_bridge_vlan *range_end)
++{
++	return v_curr->vid - range_end->vid == 1 &&
++	       range_end->flags == v_curr->flags &&
++	       br_vlan_opts_eq_range(v_curr, range_end);
++}
++
++static int br_vlan_dump_dev(const struct net_device *dev,
++			    struct sk_buff *skb,
++			    struct netlink_callback *cb,
++			    u32 dump_flags)
++{
++	struct net_bridge_vlan *v, *range_start = NULL, *range_end = NULL;
++	bool dump_global = !!(dump_flags & BRIDGE_VLANDB_DUMPF_GLOBAL);
++	bool dump_stats = !!(dump_flags & BRIDGE_VLANDB_DUMPF_STATS);
++	struct net_bridge_vlan_group *vg;
++	int idx = 0, s_idx = cb->args[1];
++	struct nlmsghdr *nlh = NULL;
++	struct net_bridge_port *p;
++	struct br_vlan_msg *bvm;
++	struct net_bridge *br;
++	int err = 0;
++	u16 pvid;
++
++	if (!netif_is_bridge_master(dev) && !netif_is_bridge_port(dev))
++		return -EINVAL;
++
++	if (netif_is_bridge_master(dev)) {
++		br = netdev_priv(dev);
++		vg = br_vlan_group_rcu(br);
++		p = NULL;
++	} else {
++		/* global options are dumped only for bridge devices */
++		if (dump_global)
++			return 0;
++
++		p = br_port_get_rcu(dev);
++		if (WARN_ON(!p))
++			return -EINVAL;
++		vg = nbp_vlan_group_rcu(p);
++		br = p->br;
++	}
++
++	if (!vg)
++		return 0;
++
++	nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
++			RTM_NEWVLAN, sizeof(*bvm), NLM_F_MULTI);
++	if (!nlh)
++		return -EMSGSIZE;
++	bvm = nlmsg_data(nlh);
++	memset(bvm, 0, sizeof(*bvm));
++	bvm->family = PF_BRIDGE;
++	bvm->ifindex = dev->ifindex;
++	pvid = br_get_pvid(vg);
++
++	/* idx must stay at range's beginning until it is filled in */
++	list_for_each_entry_rcu(v, &vg->vlan_list, vlist) {
++		if (!dump_global && !br_vlan_should_use(v))
++			continue;
++		if (idx < s_idx) {
++			idx++;
++			continue;
++		}
++
++		if (!range_start) {
++			range_start = v;
++			range_end = v;
++			continue;
++		}
++
++		if (dump_global) {
++			if (br_vlan_global_opts_can_enter_range(v, range_end))
++				goto update_end;
++			if (!br_vlan_global_opts_fill(skb, range_start->vid,
++						      range_end->vid,
++						      range_start)) {
++				err = -EMSGSIZE;
++				break;
++			}
++			/* advance number of filled vlans */
++			idx += range_end->vid - range_start->vid + 1;
++
++			range_start = v;
++		} else if (dump_stats || v->vid == pvid ||
++			   !br_vlan_can_enter_range(v, range_end)) {
++			u16 vlan_flags = br_vlan_flags(range_start, pvid);
++
++			if (!br_vlan_fill_vids(skb, range_start->vid,
++					       range_end->vid, range_start,
++					       vlan_flags, dump_stats)) {
++				err = -EMSGSIZE;
++				break;
++			}
++			/* advance number of filled vlans */
++			idx += range_end->vid - range_start->vid + 1;
++
++			range_start = v;
++		}
++update_end:
++		range_end = v;
++	}
++
++	/* err will be 0 and range_start will be set in 3 cases here:
++	 * - first vlan (range_start == range_end)
++	 * - last vlan (range_start == range_end, not in range)
++	 * - last vlan range (range_start != range_end, in range)
++	 */
++	if (!err && range_start) {
++		if (dump_global &&
++		    !br_vlan_global_opts_fill(skb, range_start->vid,
++					      range_end->vid, range_start))
++			err = -EMSGSIZE;
++		else if (!dump_global &&
++			 !br_vlan_fill_vids(skb, range_start->vid,
++					    range_end->vid, range_start,
++					    br_vlan_flags(range_start, pvid),
++					    dump_stats))
++			err = -EMSGSIZE;
++	}
++
++	cb->args[1] = err ? idx : 0;
++
++	nlmsg_end(skb, nlh);
++
++	return err;
++}
++
++static const struct nla_policy br_vlan_db_dump_pol[BRIDGE_VLANDB_DUMP_MAX + 1] = {
++	[BRIDGE_VLANDB_DUMP_FLAGS] = { .type = NLA_U32 },
++};
++
++static int br_vlan_rtm_dump(struct sk_buff *skb, struct netlink_callback *cb)
++{
++	struct nlattr *dtb[BRIDGE_VLANDB_DUMP_MAX + 1];
++	int idx = 0, err = 0, s_idx = cb->args[0];
++	struct net *net = sock_net(skb->sk);
++	struct br_vlan_msg *bvm;
++	struct net_device *dev;
++	u32 dump_flags = 0;
++
++	err = nlmsg_parse(cb->nlh, sizeof(*bvm), dtb, BRIDGE_VLANDB_DUMP_MAX,
++			  br_vlan_db_dump_pol, cb->extack);
++	if (err < 0)
++		return err;
++
++	bvm = nlmsg_data(cb->nlh);
++	if (dtb[BRIDGE_VLANDB_DUMP_FLAGS])
++		dump_flags = nla_get_u32(dtb[BRIDGE_VLANDB_DUMP_FLAGS]);
++
++	rcu_read_lock();
++	if (bvm->ifindex) {
++		dev = dev_get_by_index_rcu(net, bvm->ifindex);
++		if (!dev) {
++			err = -ENODEV;
++			goto out_err;
++		}
++		err = br_vlan_dump_dev(dev, skb, cb, dump_flags);
++		/* if the dump completed without an error we return 0 here */
++		if (err != -EMSGSIZE)
++			goto out_err;
++	} else {
++		for_each_netdev_rcu(net, dev) {
++			if (idx < s_idx)
++				goto skip;
++
++			err = br_vlan_dump_dev(dev, skb, cb, dump_flags);
++			if (err == -EMSGSIZE)
++				break;
++skip:
++			idx++;
++		}
++	}
++	cb->args[0] = idx;
++	rcu_read_unlock();
++
++	return skb->len;
++
++out_err:
++	rcu_read_unlock();
++
++	return err;
++}
++
++static const struct nla_policy br_vlan_db_policy[BRIDGE_VLANDB_ENTRY_MAX + 1] = {
++	[BRIDGE_VLANDB_ENTRY_INFO]	=
++		NLA_POLICY_EXACT_LEN(sizeof(struct bridge_vlan_info)),
++	[BRIDGE_VLANDB_ENTRY_RANGE]	= { .type = NLA_U16 },
++	[BRIDGE_VLANDB_ENTRY_STATE]	= { .type = NLA_U8 },
++	[BRIDGE_VLANDB_ENTRY_TUNNEL_INFO] = { .type = NLA_NESTED },
++	[BRIDGE_VLANDB_ENTRY_MCAST_ROUTER]	= { .type = NLA_U8 },
++};
++
++static int br_vlan_rtm_process_one(struct net_device *dev,
++				   const struct nlattr *attr,
++				   int cmd, struct netlink_ext_ack *extack)
++{
++	struct bridge_vlan_info *vinfo, vrange_end, *vinfo_last = NULL;
++	struct nlattr *tb[BRIDGE_VLANDB_ENTRY_MAX + 1];
++	bool changed = false, skip_processing = false;
++	struct net_bridge_vlan_group *vg;
++	struct net_bridge_port *p = NULL;
++	int err = 0, cmdmap = 0;
++	struct net_bridge *br;
++
++	if (netif_is_bridge_master(dev)) {
++		br = netdev_priv(dev);
++		vg = br_vlan_group(br);
++	} else {
++		p = br_port_get_rtnl(dev);
++		if (WARN_ON(!p))
++			return -ENODEV;
++		br = p->br;
++		vg = nbp_vlan_group(p);
++	}
++
++	if (WARN_ON(!vg))
++		return -ENODEV;
++
++	err = nla_parse_nested(tb, BRIDGE_VLANDB_ENTRY_MAX, attr,
++			       br_vlan_db_policy, extack);
++	if (err)
++		return err;
++
++	if (!tb[BRIDGE_VLANDB_ENTRY_INFO]) {
++		NL_SET_ERR_MSG_MOD(extack, "Missing vlan entry info");
++		return -EINVAL;
++	}
++	memset(&vrange_end, 0, sizeof(vrange_end));
++
++	vinfo = nla_data(tb[BRIDGE_VLANDB_ENTRY_INFO]);
++	if (vinfo->flags & (BRIDGE_VLAN_INFO_RANGE_BEGIN |
++			    BRIDGE_VLAN_INFO_RANGE_END)) {
++		NL_SET_ERR_MSG_MOD(extack, "Old-style vlan ranges are not allowed when using RTM vlan calls");
++		return -EINVAL;
++	}
++	if (!br_vlan_valid_id(vinfo->vid, extack))
++		return -EINVAL;
++
++	if (tb[BRIDGE_VLANDB_ENTRY_RANGE]) {
++		vrange_end.vid = nla_get_u16(tb[BRIDGE_VLANDB_ENTRY_RANGE]);
++		/* validate user-provided flags without RANGE_BEGIN */
++		vrange_end.flags = BRIDGE_VLAN_INFO_RANGE_END | vinfo->flags;
++		vinfo->flags |= BRIDGE_VLAN_INFO_RANGE_BEGIN;
++
++		/* vinfo_last is the range start, vinfo the range end */
++		vinfo_last = vinfo;
++		vinfo = &vrange_end;
++
++		if (!br_vlan_valid_id(vinfo->vid, extack) ||
++		    !br_vlan_valid_range(vinfo, vinfo_last, extack))
++			return -EINVAL;
++	}
++
++	switch (cmd) {
++	case RTM_NEWVLAN:
++		cmdmap = RTM_SETLINK;
++		skip_processing = !!(vinfo->flags & BRIDGE_VLAN_INFO_ONLY_OPTS);
++		break;
++	case RTM_DELVLAN:
++		cmdmap = RTM_DELLINK;
++		break;
++	}
++
++	if (!skip_processing) {
++		struct bridge_vlan_info *tmp_last = vinfo_last;
++
++		/* br_process_vlan_info may overwrite vinfo_last */
++		err = br_process_vlan_info(br, p, cmdmap, vinfo, &tmp_last,
++					   &changed, extack);
++
++		/* notify first if anything changed */
++		if (changed)
++			br_ifinfo_notify(cmdmap, br, p);
++
++		if (err)
++			return err;
++	}
++
++	/* deal with options */
++	if (cmd == RTM_NEWVLAN) {
++		struct net_bridge_vlan *range_start, *range_end;
++
++		if (vinfo_last) {
++			range_start = br_vlan_find(vg, vinfo_last->vid);
++			range_end = br_vlan_find(vg, vinfo->vid);
++		} else {
++			range_start = br_vlan_find(vg, vinfo->vid);
++			range_end = range_start;
++		}
++
++		err = br_vlan_process_options(br, p, range_start, range_end,
++					      tb, extack);
++	}
++
++	return err;
++}
++
++static int br_vlan_rtm_process(struct sk_buff *skb, struct nlmsghdr *nlh,
++			       struct netlink_ext_ack *extack)
++{
++	struct net *net = sock_net(skb->sk);
++	struct br_vlan_msg *bvm;
++	struct net_device *dev;
++	struct nlattr *attr;
++	int err, vlans = 0;
++	int rem;
++
++	/* this should validate the header and check for remaining bytes */
++	err = nlmsg_parse(nlh, sizeof(*bvm), NULL, BRIDGE_VLANDB_MAX, NULL,
++			  extack);
++	if (err < 0)
++		return err;
++
++	bvm = nlmsg_data(nlh);
++	dev = __dev_get_by_index(net, bvm->ifindex);
++	if (!dev)
++		return -ENODEV;
++
++	if (!netif_is_bridge_master(dev) && !netif_is_bridge_port(dev)) {
++		NL_SET_ERR_MSG_MOD(extack, "The device is not a valid bridge or bridge port");
++		return -EINVAL;
++	}
++
++	nlmsg_for_each_attr(attr, nlh, sizeof(*bvm), rem) {
++		switch (nla_type(attr)) {
++		case BRIDGE_VLANDB_ENTRY:
++			err = br_vlan_rtm_process_one(dev, attr,
++						      nlh->nlmsg_type,
++						      extack);
++			break;
++		default:
++			continue;
++		}
++
++		vlans++;
++		if (err)
++			break;
++	}
++	if (!vlans) {
++		NL_SET_ERR_MSG_MOD(extack, "No vlans found to process");
++		err = -EINVAL;
++	}
++
++	return err;
++}
++
++void br_vlan_rtnl_init(void)
++{
++	rtnl_register_module(THIS_MODULE, PF_BRIDGE, RTM_GETVLAN, NULL,
++			     br_vlan_rtm_dump, 0);
++	rtnl_register_module(THIS_MODULE, PF_BRIDGE, RTM_NEWVLAN,
++			     br_vlan_rtm_process, NULL, 0);
++	rtnl_register_module(THIS_MODULE, PF_BRIDGE, RTM_DELVLAN,
++			     br_vlan_rtm_process, NULL, 0);
++}
++
++void br_vlan_rtnl_uninit(void)
++{
++	rtnl_unregister(PF_BRIDGE, RTM_GETVLAN);
++	rtnl_unregister(PF_BRIDGE, RTM_NEWVLAN);
++	rtnl_unregister(PF_BRIDGE, RTM_DELVLAN);
++}
+diff --git a/net/bridge/br_vlan_options.c b/net/bridge/br_vlan_options.c
+new file mode 100644
+index 0000000..5e48c29
+--- /dev/null
++++ b/net/bridge/br_vlan_options.c
+@@ -0,0 +1,346 @@
++// SPDX-License-Identifier: GPL-2.0-only
++// Copyright (c) 2020, Nikolay Aleksandrov <nikolay@cumulusnetworks.com>
++#include <linux/kernel.h>
++#include <linux/netdevice.h>
++#include <linux/rtnetlink.h>
++#include <linux/slab.h>
++#include <net/ip_tunnels.h>
++
++#include "br_private.h"
++#include "br_private_tunnel.h"
++
++static bool __vlan_tun_put(struct sk_buff *skb, const struct net_bridge_vlan *v)
++{
++	__be32 tid = tunnel_id_to_key32(v->tinfo.tunnel_id);
++	struct nlattr *nest;
++
++	if (!v->tinfo.tunnel_dst)
++		return true;
++
++	nest = nla_nest_start(skb, BRIDGE_VLANDB_ENTRY_TUNNEL_INFO);
++	if (!nest)
++		return false;
++	if (nla_put_u32(skb, BRIDGE_VLANDB_TINFO_ID, be32_to_cpu(tid))) {
++		nla_nest_cancel(skb, nest);
++		return false;
++	}
++	nla_nest_end(skb, nest);
++
++	return true;
++}
++
++static bool __vlan_tun_can_enter_range(struct net_bridge_vlan *v_curr,
++				       struct net_bridge_vlan *range_end)
++{
++	return (!v_curr->tinfo.tunnel_dst && !range_end->tinfo.tunnel_dst) ||
++	       vlan_tunid_inrange(v_curr, range_end);
++}
++
++/* check if the options' state of v_curr allow it to enter the range */
++bool br_vlan_opts_eq_range(struct net_bridge_vlan *v_curr,
++			   struct net_bridge_vlan *range_end)
++{
++	u8 range_mc_rtr = br_vlan_multicast_router(range_end);
++	u8 curr_mc_rtr = br_vlan_multicast_router(v_curr);
++
++	return v_curr->state == range_end->state &&
++	       __vlan_tun_can_enter_range(v_curr, range_end) &&
++	       curr_mc_rtr == range_mc_rtr;
++}
++
++bool br_vlan_opts_fill(struct sk_buff *skb, const struct net_bridge_vlan *v)
++{
++	if (nla_put_u8(skb, BRIDGE_VLANDB_ENTRY_STATE, br_vlan_get_state(v)) ||
++	    !__vlan_tun_put(skb, v))
++		return false;
++
++#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
++	if (nla_put_u8(skb, BRIDGE_VLANDB_ENTRY_MCAST_ROUTER,
++		       br_vlan_multicast_router(v)))
++		return false;
++#endif
++
++	return true;
++}
++
++size_t br_vlan_opts_nl_size(void)
++{
++	return nla_total_size(sizeof(u8)) /* BRIDGE_VLANDB_ENTRY_STATE */
++	       + nla_total_size(0) /* BRIDGE_VLANDB_ENTRY_TUNNEL_INFO */
++	       + nla_total_size(sizeof(u32)) /* BRIDGE_VLANDB_TINFO_ID */
++#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
++	       + nla_total_size(sizeof(u8)) /* BRIDGE_VLANDB_ENTRY_MCAST_ROUTER */
++#endif
++	       + 0;
++}
++
++static int br_vlan_modify_state(struct net_bridge_vlan_group *vg,
++				struct net_bridge_vlan *v,
++				u8 state,
++				bool *changed,
++				struct netlink_ext_ack *extack)
++{
++	struct net_bridge *br;
++
++	ASSERT_RTNL();
++
++	if (state > BR_STATE_BLOCKING) {
++		NL_SET_ERR_MSG_MOD(extack, "Invalid vlan state");
++		return -EINVAL;
++	}
++
++	if (br_vlan_is_brentry(v))
++		br = v->br;
++	else
++		br = v->port->br;
++
++	if (br->stp_enabled == BR_KERNEL_STP) {
++		NL_SET_ERR_MSG_MOD(extack, "Can't modify vlan state when using kernel STP");
++		return -EBUSY;
++	}
++
++	if (v->state == state)
++		return 0;
++
++	if (v->vid == br_get_pvid(vg))
++		br_vlan_set_pvid_state(vg, state);
++
++	br_vlan_set_state(v, state);
++	*changed = true;
++
++	return 0;
++}
++
++static const struct nla_policy br_vlandb_tinfo_pol[BRIDGE_VLANDB_TINFO_MAX + 1] = {
++	[BRIDGE_VLANDB_TINFO_ID]	= { .type = NLA_U32 },
++	[BRIDGE_VLANDB_TINFO_CMD]	= { .type = NLA_U32 },
++};
++
++static int br_vlan_modify_tunnel(struct net_bridge_port *p,
++				 struct net_bridge_vlan *v,
++				 struct nlattr **tb,
++				 bool *changed,
++				 struct netlink_ext_ack *extack)
++{
++	struct nlattr *tun_tb[BRIDGE_VLANDB_TINFO_MAX + 1], *attr;
++	struct bridge_vlan_info *vinfo;
++	u32 tun_id = 0;
++	int cmd, err;
++
++	if (!p) {
++		NL_SET_ERR_MSG_MOD(extack, "Can't modify tunnel mapping of non-port vlans");
++		return -EINVAL;
++	}
++	if (!(p->flags & BR_VLAN_TUNNEL)) {
++		NL_SET_ERR_MSG_MOD(extack, "Port doesn't have tunnel flag set");
++		return -EINVAL;
++	}
++
++	attr = tb[BRIDGE_VLANDB_ENTRY_TUNNEL_INFO];
++	err = nla_parse_nested(tun_tb, BRIDGE_VLANDB_TINFO_MAX, attr,
++			       br_vlandb_tinfo_pol, extack);
++	if (err)
++		return err;
++
++	if (!tun_tb[BRIDGE_VLANDB_TINFO_CMD]) {
++		NL_SET_ERR_MSG_MOD(extack, "Missing tunnel command attribute");
++		return -ENOENT;
++	}
++	cmd = nla_get_u32(tun_tb[BRIDGE_VLANDB_TINFO_CMD]);
++	switch (cmd) {
++	case RTM_SETLINK:
++		if (!tun_tb[BRIDGE_VLANDB_TINFO_ID]) {
++			NL_SET_ERR_MSG_MOD(extack, "Missing tunnel id attribute");
++			return -ENOENT;
++		}
++		/* when working on vlan ranges this is the starting tunnel id */
++		tun_id = nla_get_u32(tun_tb[BRIDGE_VLANDB_TINFO_ID]);
++		/* vlan info attr is guaranteed by br_vlan_rtm_process_one */
++		vinfo = nla_data(tb[BRIDGE_VLANDB_ENTRY_INFO]);
++		/* tunnel ids are mapped to each vlan in increasing order,
++		 * the starting vlan is in BRIDGE_VLANDB_ENTRY_INFO and v is the
++		 * current vlan, so we compute: tun_id + v - vinfo->vid
++		 */
++		tun_id += v->vid - vinfo->vid;
++		break;
++	case RTM_DELLINK:
++		break;
++	default:
++		NL_SET_ERR_MSG_MOD(extack, "Unsupported tunnel command");
++		return -EINVAL;
++	}
++
++	return br_vlan_tunnel_info(p, cmd, v->vid, tun_id, changed);
++}
++
++static int br_vlan_process_one_opts(const struct net_bridge *br,
++				    struct net_bridge_port *p,
++				    struct net_bridge_vlan_group *vg,
++				    struct net_bridge_vlan *v,
++				    struct nlattr **tb,
++				    bool *changed,
++				    struct netlink_ext_ack *extack)
++{
++	int err;
++
++	*changed = false;
++	if (tb[BRIDGE_VLANDB_ENTRY_STATE]) {
++		u8 state = nla_get_u8(tb[BRIDGE_VLANDB_ENTRY_STATE]);
++
++		err = br_vlan_modify_state(vg, v, state, changed, extack);
++		if (err)
++			return err;
++	}
++	if (tb[BRIDGE_VLANDB_ENTRY_TUNNEL_INFO]) {
++		err = br_vlan_modify_tunnel(p, v, tb, changed, extack);
++		if (err)
++			return err;
++	}
++
++	return 0;
++}
++
++int br_vlan_process_options(const struct net_bridge *br,
++			    struct net_bridge_port *p,
++			    struct net_bridge_vlan *range_start,
++			    struct net_bridge_vlan *range_end,
++			    struct nlattr **tb,
++			    struct netlink_ext_ack *extack)
++{
++	struct net_bridge_vlan *v, *curr_start = NULL, *curr_end = NULL;
++	struct net_bridge_vlan_group *vg;
++	int vid, err = 0;
++	u16 pvid;
++
++	if (p)
++		vg = nbp_vlan_group(p);
++	else
++		vg = br_vlan_group(br);
++
++	if (!range_start || !br_vlan_should_use(range_start)) {
++		NL_SET_ERR_MSG_MOD(extack, "Vlan range start doesn't exist, can't process options");
++		return -ENOENT;
++	}
++	if (!range_end || !br_vlan_should_use(range_end)) {
++		NL_SET_ERR_MSG_MOD(extack, "Vlan range end doesn't exist, can't process options");
++		return -ENOENT;
++	}
++
++	pvid = br_get_pvid(vg);
++	for (vid = range_start->vid; vid <= range_end->vid; vid++) {
++		bool changed = false;
++
++		v = br_vlan_find(vg, vid);
++		if (!v || !br_vlan_should_use(v)) {
++			NL_SET_ERR_MSG_MOD(extack, "Vlan in range doesn't exist, can't process options");
++			err = -ENOENT;
++			break;
++		}
++
++		err = br_vlan_process_one_opts(br, p, vg, v, tb, &changed,
++					       extack);
++		if (err)
++			break;
++
++		if (changed) {
++			/* vlan options changed, check for range */
++			if (!curr_start) {
++				curr_start = v;
++				curr_end = v;
++				continue;
++			}
++
++			if (v->vid == pvid ||
++			    !br_vlan_can_enter_range(v, curr_end)) {
++				br_vlan_notify(br, p, curr_start->vid,
++					       curr_end->vid, RTM_NEWVLAN);
++				curr_start = v;
++			}
++			curr_end = v;
++		} else {
++			/* nothing changed and nothing to notify yet */
++			if (!curr_start)
++				continue;
++
++			br_vlan_notify(br, p, curr_start->vid, curr_end->vid,
++				       RTM_NEWVLAN);
++			curr_start = NULL;
++			curr_end = NULL;
++		}
++	}
++	if (curr_start)
++		br_vlan_notify(br, p, curr_start->vid, curr_end->vid,
++			       RTM_NEWVLAN);
++
++	return err;
++}
++
++bool br_vlan_global_opts_can_enter_range(const struct net_bridge_vlan *v_curr,
++					 const struct net_bridge_vlan *r_end)
++{
++	return v_curr->vid - r_end->vid == 1 &&
++	       ((v_curr->priv_flags ^ r_end->priv_flags) &
++		BR_VLFLAG_GLOBAL_MCAST_ENABLED) == 0 &&
++		br_multicast_ctx_options_equal(&v_curr->br_mcast_ctx,
++					       &r_end->br_mcast_ctx);
++}
++
++bool br_vlan_global_opts_fill(struct sk_buff *skb, u16 vid, u16 vid_range,
++			      const struct net_bridge_vlan *v_opts)
++{
++	struct nlattr *nest2 __maybe_unused;
++	u64 clockval __maybe_unused;
++	struct nlattr *nest;
++
++	nest = nla_nest_start(skb, BRIDGE_VLANDB_GLOBAL_OPTIONS);
++	if (!nest)
++		return false;
++
++	if (nla_put_u16(skb, BRIDGE_VLANDB_GOPTS_ID, vid))
++		goto out_err;
++
++	if (vid_range && vid < vid_range &&
++	    nla_put_u16(skb, BRIDGE_VLANDB_GOPTS_RANGE, vid_range))
++		goto out_err;
++
++#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
++	clockval = jiffies_to_clock_t(v_opts->br_mcast_ctx.multicast_last_member_interval);
++	if (nla_put_u64_64bit(skb, BRIDGE_VLANDB_GOPTS_MCAST_LAST_MEMBER_INTVL,
++			      clockval, BRIDGE_VLANDB_GOPTS_PAD))
++		goto out_err;
++	clockval = jiffies_to_clock_t(v_opts->br_mcast_ctx.multicast_membership_interval);
++	if (nla_put_u64_64bit(skb, BRIDGE_VLANDB_GOPTS_MCAST_MEMBERSHIP_INTVL,
++			      clockval, BRIDGE_VLANDB_GOPTS_PAD))
++		goto out_err;
++	clockval = jiffies_to_clock_t(v_opts->br_mcast_ctx.multicast_querier_interval);
++	if (nla_put_u64_64bit(skb, BRIDGE_VLANDB_GOPTS_MCAST_QUERIER_INTVL,
++			      clockval, BRIDGE_VLANDB_GOPTS_PAD))
++		goto out_err;
++	clockval = jiffies_to_clock_t(v_opts->br_mcast_ctx.multicast_query_interval);
++	if (nla_put_u64_64bit(skb, BRIDGE_VLANDB_GOPTS_MCAST_QUERY_INTVL,
++			      clockval, BRIDGE_VLANDB_GOPTS_PAD))
++		goto out_err;
++	clockval = jiffies_to_clock_t(v_opts->br_mcast_ctx.multicast_query_response_interval);
++	if (nla_put_u64_64bit(skb, BRIDGE_VLANDB_GOPTS_MCAST_QUERY_RESPONSE_INTVL,
++			      clockval, BRIDGE_VLANDB_GOPTS_PAD))
++		goto out_err;
++	clockval = jiffies_to_clock_t(v_opts->br_mcast_ctx.multicast_startup_query_interval);
++	if (nla_put_u64_64bit(skb, BRIDGE_VLANDB_GOPTS_MCAST_STARTUP_QUERY_INTVL,
++			      clockval, BRIDGE_VLANDB_GOPTS_PAD))
++		goto out_err;
++
++#if IS_ENABLED(CONFIG_IPV6)
++	if (nla_put_u8(skb, BRIDGE_VLANDB_GOPTS_MCAST_MLD_VERSION,
++		       v_opts->br_mcast_ctx.multicast_mld_version))
++		goto out_err;
++#endif
++#endif
++
++	nla_nest_end(skb, nest);
++
++	return true;
++
++out_err:
++	nla_nest_cancel(skb, nest);
++	return false;
++}
+diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
+index dbc9b2f..706b207 100644
+--- a/net/core/rtnetlink.c
++++ b/net/core/rtnetlink.c
+@@ -1996,6 +1996,7 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
+ 				goto cont;
+ 			if (idx < s_idx)
+ 				goto cont;
++
+ 			err = rtnl_fill_ifinfo(skb, dev, net,
+ 					       RTM_NEWLINK,
+ 					       NETLINK_CB(cb->skb).portid,
+diff --git a/net/dsa/slave.c b/net/dsa/slave.c
+index 2dfaa1e..a60a26c 100644
+--- a/net/dsa/slave.c
++++ b/net/dsa/slave.c
+@@ -1495,8 +1495,19 @@ int dsa_slave_create(struct dsa_port *port)
+ 		goto out_phy;
+ 	}
+ 
++	rtnl_lock();
++
++	ret = netdev_upper_dev_link(master, slave_dev, NULL);
++
++	rtnl_unlock();
++
++	if (ret)
++		goto out_unregister;
++
+ 	return 0;
+ 
++out_unregister:
++	unregister_netdev(slave_dev);
+ out_phy:
+ 	rtnl_lock();
+ 	phylink_disconnect_phy(p->dp->pl);