blob: 6fe37336a34d6271f1faeffaf901996c182d54f0 [file] [log] [blame]
developerdd386532022-07-26 17:14:47 +08001diff --git a/include/net/switchdev.h b/include/net/switchdev.h
2index 191dc34..d4d71d9 100644
3--- a/include/net/switchdev.h
4+++ b/include/net/switchdev.h
5@@ -77,6 +77,7 @@ struct switchdev_obj {
6 struct switchdev_obj_port_vlan {
7 struct switchdev_obj obj;
8 u16 flags;
9+ u16 vid;
10 u16 vid_begin;
11 u16 vid_end;
12 };
13@@ -117,6 +118,7 @@ enum switchdev_notifier_type {
14 struct switchdev_notifier_info {
15 struct net_device *dev;
16 struct netlink_ext_ack *extack;
17+ const void *ctx;
18 };
19
20 struct switchdev_notifier_fdb_info {
21diff --git a/net/bridge/Makefile b/net/bridge/Makefile
22index ac9ef33..49da7ae 100644
23--- a/net/bridge/Makefile
24+++ b/net/bridge/Makefile
25@@ -20,7 +20,7 @@ obj-$(CONFIG_BRIDGE_NETFILTER) += br_netfilter.o
26
27 bridge-$(CONFIG_BRIDGE_IGMP_SNOOPING) += br_multicast.o br_mdb.o
28
29-bridge-$(CONFIG_BRIDGE_VLAN_FILTERING) += br_vlan.o br_vlan_tunnel.o
30+bridge-$(CONFIG_BRIDGE_VLAN_FILTERING) += br_vlan.o br_vlan_tunnel.o br_vlan_options.o
31
32 bridge-$(CONFIG_NET_SWITCHDEV) += br_switchdev.o
33
34diff --git a/net/bridge/br_mdb.c b/net/bridge/br_mdb.c
35index da5ed4c..eeabfbc 100644
36--- a/net/bridge/br_mdb.c
37+++ b/net/bridge/br_mdb.c
38@@ -16,7 +16,37 @@
39
40 #include "br_private.h"
41
42-static int br_rports_fill_info(struct sk_buff *skb, struct netlink_callback *cb,
43+static size_t __br_rports_one_size(void)
44+{
45+ return nla_total_size(sizeof(u32)) + /* MDBA_ROUTER_PORT */
46+ nla_total_size(sizeof(u32)) + /* MDBA_ROUTER_PATTR_TIMER */
47+ nla_total_size(sizeof(u8)) + /* MDBA_ROUTER_PATTR_TYPE */
48+ nla_total_size(sizeof(u32)) + /* MDBA_ROUTER_PATTR_INET_TIMER */
49+ nla_total_size(sizeof(u32)) + /* MDBA_ROUTER_PATTR_INET6_TIMER */
50+ nla_total_size(sizeof(u32)); /* MDBA_ROUTER_PATTR_VID */
51+}
52+
53+size_t br_rports_size(const struct net_bridge_mcast *brmctx)
54+{
55+ struct net_bridge_mcast_port *pmctx;
56+ size_t size = nla_total_size(0); /* MDBA_ROUTER */
57+
58+ rcu_read_lock();
59+ hlist_for_each_entry_rcu(pmctx, &brmctx->ip4_mc_router_list,
60+ ip4_rlist)
61+ size += __br_rports_one_size();
62+
63+#if IS_ENABLED(CONFIG_IPV6)
64+ hlist_for_each_entry_rcu(pmctx, &brmctx->ip6_mc_router_list,
65+ ip6_rlist)
66+ size += __br_rports_one_size();
67+#endif
68+ rcu_read_unlock();
69+
70+ return size;
71+}
72+
73+int br_rports_fill_info(struct sk_buff *skb, struct netlink_callback *cb,
74 struct net_device *dev)
75 {
76 struct net_bridge *br = netdev_priv(dev);
77diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
78index cbcbc19..887e767 100644
79--- a/net/bridge/br_netlink.c
80+++ b/net/bridge/br_netlink.c
81@@ -562,7 +562,7 @@ static int br_vlan_info(struct net_bridge *br, struct net_bridge_port *p,
82 return err;
83 }
84
85-static int br_process_vlan_info(struct net_bridge *br,
86+int br_process_vlan_info(struct net_bridge *br,
87 struct net_bridge_port *p, int cmd,
88 struct bridge_vlan_info *vinfo_curr,
89 struct bridge_vlan_info **vinfo_last,
90@@ -1578,7 +1578,7 @@ static int br_fill_linkxstats(struct sk_buff *skb,
91 pvid = br_get_pvid(vg);
92 list_for_each_entry(v, &vg->vlan_list, vlist) {
93 struct bridge_vlan_xstats vxi;
94- struct br_vlan_stats stats;
95+ struct pcpu_sw_netstats stats;
96
97 if (++vl_idx < *prividx)
98 continue;
99@@ -1652,6 +1652,7 @@ int __init br_netlink_init(void)
100 int err;
101
102 br_mdb_init();
103+ br_vlan_rtnl_init();
104 rtnl_af_register(&br_af_ops);
105
106 err = rtnl_link_register(&br_link_ops);
107@@ -1669,6 +1670,7 @@ int __init br_netlink_init(void)
108 void br_netlink_fini(void)
109 {
110 br_mdb_uninit();
111+ br_vlan_rtnl_uninit();
112 rtnl_af_unregister(&br_af_ops);
113 rtnl_link_unregister(&br_link_ops);
114 }
115diff --git a/net/bridge/br_netlink_tunnel.c b/net/bridge/br_netlink_tunnel.c
116index afee292..3bbbd66 100644
117--- a/net/bridge/br_netlink_tunnel.c
118+++ b/net/bridge/br_netlink_tunnel.c
119@@ -26,7 +26,7 @@ static size_t __get_vlan_tinfo_size(void)
120 nla_total_size(sizeof(u16)); /* IFLA_BRIDGE_VLAN_TUNNEL_FLAGS */
121 }
122
123-static bool vlan_tunid_inrange(struct net_bridge_vlan *v_curr,
124+bool vlan_tunid_inrange(struct net_bridge_vlan *v_curr,
125 struct net_bridge_vlan *v_last)
126 {
127 __be32 tunid_curr = tunnel_id_to_key32(v_curr->tinfo.tunnel_id);
128@@ -193,7 +193,7 @@ static const struct nla_policy vlan_tunnel_policy[IFLA_BRIDGE_VLAN_TUNNEL_MAX +
129 [IFLA_BRIDGE_VLAN_TUNNEL_FLAGS] = { .type = NLA_U16 },
130 };
131
132-static int br_vlan_tunnel_info(struct net_bridge_port *p, int cmd,
133+int br_vlan_tunnel_info(struct net_bridge_port *p, int cmd,
134 u16 vid, u32 tun_id, bool *changed)
135 {
136 int err = 0;
137diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
138index 4bd9e9b..4620f70 100644
139--- a/net/bridge/br_private.h
140+++ b/net/bridge/br_private.h
141@@ -95,6 +95,60 @@ struct br_vlan_stats {
142 struct u64_stats_sync syncp;
143 };
144
145+/* net_bridge_mcast_port must be always defined due to forwarding stubs */
146+struct net_bridge_mcast_port {
147+#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
148+ struct net_bridge_port *port;
149+ struct net_bridge_vlan *vlan;
150+
151+ struct bridge_mcast_own_query ip4_own_query;
152+ struct timer_list ip4_mc_router_timer;
153+ struct hlist_node ip4_rlist;
154+#if IS_ENABLED(CONFIG_IPV6)
155+ struct bridge_mcast_own_query ip6_own_query;
156+ struct timer_list ip6_mc_router_timer;
157+ struct hlist_node ip6_rlist;
158+#endif /* IS_ENABLED(CONFIG_IPV6) */
159+ unsigned char multicast_router;
160+#endif /* CONFIG_BRIDGE_IGMP_SNOOPING */
161+};
162+
163+/* net_bridge_mcast must be always defined due to forwarding stubs */
164+struct net_bridge_mcast {
165+#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
166+ struct net_bridge *br;
167+ struct net_bridge_vlan *vlan;
168+
169+ u32 multicast_last_member_count;
170+ u32 multicast_startup_query_count;
171+
172+ u8 multicast_querier;
173+ u8 multicast_igmp_version;
174+ u8 multicast_router;
175+#if IS_ENABLED(CONFIG_IPV6)
176+ u8 multicast_mld_version;
177+#endif
178+ unsigned long multicast_last_member_interval;
179+ unsigned long multicast_membership_interval;
180+ unsigned long multicast_querier_interval;
181+ unsigned long multicast_query_interval;
182+ unsigned long multicast_query_response_interval;
183+ unsigned long multicast_startup_query_interval;
184+ struct hlist_head ip4_mc_router_list;
185+ struct timer_list ip4_mc_router_timer;
186+ struct bridge_mcast_other_query ip4_other_query;
187+ struct bridge_mcast_own_query ip4_own_query;
188+ struct bridge_mcast_querier ip4_querier;
189+#if IS_ENABLED(CONFIG_IPV6)
190+ struct hlist_head ip6_mc_router_list;
191+ struct timer_list ip6_mc_router_timer;
192+ struct bridge_mcast_other_query ip6_other_query;
193+ struct bridge_mcast_own_query ip6_own_query;
194+ struct bridge_mcast_querier ip6_querier;
195+#endif /* IS_ENABLED(CONFIG_IPV6) */
196+#endif /* CONFIG_BRIDGE_IGMP_SNOOPING */
197+};
198+
199 struct br_tunnel_info {
200 __be64 tunnel_id;
201 struct metadata_dst __rcu *tunnel_dst;
202@@ -104,6 +158,8 @@ struct br_tunnel_info {
203 enum {
204 BR_VLFLAG_PER_PORT_STATS = BIT(0),
205 BR_VLFLAG_ADDED_BY_SWITCHDEV = BIT(1),
206+ BR_VLFLAG_MCAST_ENABLED = BIT(2),
207+ BR_VLFLAG_GLOBAL_MCAST_ENABLED = BIT(3),
208 };
209
210 /**
211@@ -113,12 +169,16 @@ enum {
212 * @vid: VLAN id
213 * @flags: bridge vlan flags
214 * @priv_flags: private (in-kernel) bridge vlan flags
215+ * @state: STP state (e.g. blocking, learning, forwarding)
216 * @stats: per-cpu VLAN statistics
217 * @br: if MASTER flag set, this points to a bridge struct
218 * @port: if MASTER flag unset, this points to a port struct
219 * @refcnt: if MASTER flag set, this is bumped for each port referencing it
220 * @brvlan: if MASTER flag unset, this points to the global per-VLAN context
221 * for this VLAN entry
222+ * @br_mcast_ctx: if MASTER flag set, this is the global vlan multicast context
223+ * @port_mcast_ctx: if MASTER flag unset, this is the per-port/vlan multicast
224+ * context
225 * @vlist: sorted list of VLAN entries
226 * @rcu: used for entry destruction
227 *
228@@ -133,7 +193,8 @@ struct net_bridge_vlan {
229 u16 vid;
230 u16 flags;
231 u16 priv_flags;
232- struct br_vlan_stats __percpu *stats;
233+ u8 state;
234+ struct pcpu_sw_netstats __percpu *stats;
235 union {
236 struct net_bridge *br;
237 struct net_bridge_port *port;
238@@ -145,6 +206,11 @@ struct net_bridge_vlan {
239
240 struct br_tunnel_info tinfo;
241
242+ union {
243+ struct net_bridge_mcast br_mcast_ctx;
244+ struct net_bridge_mcast_port port_mcast_ctx;
245+ };
246+
247 struct list_head vlist;
248
249 struct rcu_head rcu;
250@@ -170,6 +236,7 @@ struct net_bridge_vlan_group {
251 struct list_head vlan_list;
252 u16 num_vlans;
253 u16 pvid;
254+ u8 pvid_state;
255 };
256
257 struct net_bridge_fdb_key {
258@@ -497,6 +564,67 @@ static inline bool br_vlan_should_use(const struct net_bridge_vlan *v)
259 return true;
260 }
261
262+static inline bool br_vlan_valid_id(u16 vid, struct netlink_ext_ack *extack)
263+{
264+ bool ret = vid > 0 && vid < VLAN_VID_MASK;
265+
266+ if (!ret)
267+ NL_SET_ERR_MSG_MOD(extack, "Vlan id is invalid");
268+
269+ return ret;
270+}
271+
272+static inline bool br_vlan_valid_range(const struct bridge_vlan_info *cur,
273+ const struct bridge_vlan_info *last,
274+ struct netlink_ext_ack *extack)
275+{
276+ /* pvid flag is not allowed in ranges */
277+ if (cur->flags & BRIDGE_VLAN_INFO_PVID) {
278+ NL_SET_ERR_MSG_MOD(extack, "Pvid isn't allowed in a range");
279+ return false;
280+ }
281+
282+ /* when cur is the range end, check if:
283+ * - it has range start flag
284+ * - range ids are invalid (end is equal to or before start)
285+ */
286+ if (last) {
287+ if (cur->flags & BRIDGE_VLAN_INFO_RANGE_BEGIN) {
288+ NL_SET_ERR_MSG_MOD(extack, "Found a new vlan range start while processing one");
289+ return false;
290+ } else if (!(cur->flags & BRIDGE_VLAN_INFO_RANGE_END)) {
291+ NL_SET_ERR_MSG_MOD(extack, "Vlan range end flag is missing");
292+ return false;
293+ } else if (cur->vid <= last->vid) {
294+ NL_SET_ERR_MSG_MOD(extack, "End vlan id is less than or equal to start vlan id");
295+ return false;
296+ }
297+ }
298+
299+ /* check for required range flags */
300+ if (!(cur->flags & (BRIDGE_VLAN_INFO_RANGE_BEGIN |
301+ BRIDGE_VLAN_INFO_RANGE_END))) {
302+ NL_SET_ERR_MSG_MOD(extack, "Both vlan range flags are missing");
303+ return false;
304+ }
305+
306+ return true;
307+}
308+
309+static inline u8 br_vlan_multicast_router(const struct net_bridge_vlan *v)
310+{
311+ u8 mcast_router = MDB_RTR_TYPE_DISABLED;
312+
313+#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
314+ if (!br_vlan_is_master(v))
315+ mcast_router = v->port_mcast_ctx.multicast_router;
316+ else
317+ mcast_router = v->br_mcast_ctx.multicast_router;
318+#endif
319+
320+ return mcast_router;
321+}
322+
323 static inline int br_opt_get(const struct net_bridge *br,
324 enum net_bridge_opts opt)
325 {
326@@ -676,8 +804,10 @@ void br_multicast_flood(struct net_bridge_mdb_entry *mdst,
327 struct sk_buff *skb, bool local_rcv, bool local_orig);
328 int br_multicast_set_router(struct net_bridge *br, unsigned long val);
329 int br_multicast_set_port_router(struct net_bridge_port *p, unsigned long val);
330+int br_multicast_set_vlan_router(struct net_bridge_vlan *v, u8 mcast_router);
331 int br_multicast_toggle(struct net_bridge *br, unsigned long val);
332 int br_multicast_set_querier(struct net_bridge *br, unsigned long val);
333+
334 int br_multicast_set_hash_max(struct net_bridge *br, unsigned long val);
335 int br_multicast_set_igmp_version(struct net_bridge *br, unsigned long val);
336 #if IS_ENABLED(CONFIG_IPV6)
337@@ -708,6 +838,17 @@ void br_mdb_init(void);
338 void br_mdb_uninit(void);
339 void br_multicast_host_join(struct net_bridge_mdb_entry *mp, bool notify);
340 void br_multicast_host_leave(struct net_bridge_mdb_entry *mp, bool notify);
341+int br_rports_fill_info(struct sk_buff *skb, struct netlink_callback *cb,
342+ struct net_device *dev);
343+int br_multicast_dump_querier_state(struct sk_buff *skb,
344+ const struct net_bridge_mcast *brmctx,
345+ int nest_attr);
346+size_t br_multicast_querier_state_size(void);
347+size_t br_rports_size(const struct net_bridge_mcast *brmctx);
348+void br_multicast_set_query_intvl(struct net_bridge_mcast *brmctx,
349+ unsigned long val);
350+void br_multicast_set_startup_query_intvl(struct net_bridge_mcast *brmctx,
351+ unsigned long val);
352
353 #define mlock_dereference(X, br) \
354 rcu_dereference_protected(X, lockdep_is_held(&br->multicast_lock))
355@@ -760,6 +901,49 @@ static inline int br_multicast_igmp_type(const struct sk_buff *skb)
356 {
357 return BR_INPUT_SKB_CB(skb)->igmp;
358 }
359+static inline bool
360+br_rports_have_mc_router(const struct net_bridge_mcast *brmctx)
361+{
362+#if IS_ENABLED(CONFIG_IPV6)
363+ return !hlist_empty(&brmctx->ip4_mc_router_list) ||
364+ !hlist_empty(&brmctx->ip6_mc_router_list);
365+#else
366+ return !hlist_empty(&brmctx->ip4_mc_router_list);
367+#endif
368+}
369+
370+static inline bool
371+br_multicast_ctx_options_equal(const struct net_bridge_mcast *brmctx1,
372+ const struct net_bridge_mcast *brmctx2)
373+{
374+ return brmctx1->multicast_igmp_version ==
375+ brmctx2->multicast_igmp_version &&
376+ brmctx1->multicast_last_member_count ==
377+ brmctx2->multicast_last_member_count &&
378+ brmctx1->multicast_startup_query_count ==
379+ brmctx2->multicast_startup_query_count &&
380+ brmctx1->multicast_last_member_interval ==
381+ brmctx2->multicast_last_member_interval &&
382+ brmctx1->multicast_membership_interval ==
383+ brmctx2->multicast_membership_interval &&
384+ brmctx1->multicast_querier_interval ==
385+ brmctx2->multicast_querier_interval &&
386+ brmctx1->multicast_query_interval ==
387+ brmctx2->multicast_query_interval &&
388+ brmctx1->multicast_query_response_interval ==
389+ brmctx2->multicast_query_response_interval &&
390+ brmctx1->multicast_startup_query_interval ==
391+ brmctx2->multicast_startup_query_interval &&
392+ brmctx1->multicast_querier == brmctx2->multicast_querier &&
393+ brmctx1->multicast_router == brmctx2->multicast_router &&
394+ !br_rports_have_mc_router(brmctx1) &&
395+ !br_rports_have_mc_router(brmctx2) &&
396+#if IS_ENABLED(CONFIG_IPV6)
397+ brmctx1->multicast_mld_version ==
398+ brmctx2->multicast_mld_version &&
399+#endif
400+ true;
401+}
402 #else
403 static inline int br_multicast_rcv(struct net_bridge *br,
404 struct net_bridge_port *port,
405@@ -907,10 +1091,21 @@ void nbp_vlan_flush(struct net_bridge_port *port);
406 int nbp_vlan_init(struct net_bridge_port *port, struct netlink_ext_ack *extack);
407 int nbp_get_num_vlan_infos(struct net_bridge_port *p, u32 filter_mask);
408 void br_vlan_get_stats(const struct net_bridge_vlan *v,
409- struct br_vlan_stats *stats);
410+ struct pcpu_sw_netstats *stats);
411 void br_vlan_port_event(struct net_bridge_port *p, unsigned long event);
412 int br_vlan_bridge_event(struct net_device *dev, unsigned long event,
413 void *ptr);
414+void br_vlan_rtnl_init(void);
415+void br_vlan_rtnl_uninit(void);
416+void br_vlan_notify(const struct net_bridge *br,
417+ const struct net_bridge_port *p,
418+ u16 vid, u16 vid_range,
419+ int cmd);
420+int br_vlan_replay(struct net_device *br_dev, struct net_device *dev,
421+ const void *ctx, bool adding, struct notifier_block *nb,
422+ struct netlink_ext_ack *extack);
423+bool br_vlan_can_enter_range(struct net_bridge_vlan *v_curr,
424+ struct net_bridge_vlan *range_end);
425
426 void br_vlan_fill_forward_path_pvid(struct net_bridge *br,
427 struct net_device_path_ctx *ctx,
428@@ -969,6 +1164,10 @@ static inline u16 br_get_pvid(const struct net_bridge_vlan_group *vg)
429 return vg->pvid;
430 }
431
432+static inline u16 br_vlan_flags(const struct net_bridge_vlan *v, u16 pvid)
433+{
434+ return v->vid == pvid ? v->flags | BRIDGE_VLAN_INFO_PVID : v->flags;
435+}
436 #else
437 static inline bool br_allowed_ingress(const struct net_bridge *br,
438 struct net_bridge_vlan_group *vg,
439@@ -1111,7 +1310,7 @@ static inline struct net_bridge_vlan_group *nbp_vlan_group_rcu(
440 }
441
442 static inline void br_vlan_get_stats(const struct net_bridge_vlan *v,
443- struct br_vlan_stats *stats)
444+ struct pcpu_sw_netstats *stats)
445 {
446 }
447
448@@ -1125,6 +1324,88 @@ static inline int br_vlan_bridge_event(struct net_device *dev,
449 {
450 return 0;
451 }
452+
453+static inline void br_vlan_rtnl_init(void)
454+{
455+}
456+
457+static inline void br_vlan_rtnl_uninit(void)
458+{
459+}
460+
461+static inline void br_vlan_notify(const struct net_bridge *br,
462+ const struct net_bridge_port *p,
463+ u16 vid, u16 vid_range,
464+ int cmd)
465+{
466+}
467+
468+static inline bool br_vlan_can_enter_range(const struct net_bridge_vlan *v_curr,
469+ const struct net_bridge_vlan *range_end)
470+{
471+ return true;
472+}
473+
474+static inline int br_vlan_replay(struct net_device *br_dev,
475+ struct net_device *dev, const void *ctx,
476+ bool adding, struct notifier_block *nb,
477+ struct netlink_ext_ack *extack)
478+{
479+ return -EOPNOTSUPP;
480+}
481+#endif
482+
483+/* br_vlan_options.c */
484+#ifdef CONFIG_BRIDGE_VLAN_FILTERING
485+bool br_vlan_opts_eq_range(struct net_bridge_vlan *v_curr,
486+ struct net_bridge_vlan *range_end);
487+bool br_vlan_opts_fill(struct sk_buff *skb, const struct net_bridge_vlan *v);
488+size_t br_vlan_opts_nl_size(void);
489+int br_vlan_process_options(const struct net_bridge *br,
490+ struct net_bridge_port *p,
491+ struct net_bridge_vlan *range_start,
492+ struct net_bridge_vlan *range_end,
493+ struct nlattr **tb,
494+ struct netlink_ext_ack *extack);
495+bool br_vlan_global_opts_can_enter_range(const struct net_bridge_vlan *v_curr,
496+ const struct net_bridge_vlan *r_end);
497+bool br_vlan_global_opts_fill(struct sk_buff *skb, u16 vid, u16 vid_range,
498+ const struct net_bridge_vlan *v_opts);
499+
500+/* vlan state manipulation helpers using *_ONCE to annotate lock-free access */
501+static inline u8 br_vlan_get_state(const struct net_bridge_vlan *v)
502+{
503+ return READ_ONCE(v->state);
504+}
505+
506+static inline void br_vlan_set_state(struct net_bridge_vlan *v, u8 state)
507+{
508+ WRITE_ONCE(v->state, state);
509+}
510+
511+static inline u8 br_vlan_get_pvid_state(const struct net_bridge_vlan_group *vg)
512+{
513+ return READ_ONCE(vg->pvid_state);
514+}
515+
516+static inline void br_vlan_set_pvid_state(struct net_bridge_vlan_group *vg,
517+ u8 state)
518+{
519+ WRITE_ONCE(vg->pvid_state, state);
520+}
521+
522+/* learn_allow is true at ingress and false at egress */
523+static inline bool br_vlan_state_allowed(u8 state, bool learn_allow)
524+{
525+ switch (state) {
526+ case BR_STATE_LEARNING:
527+ return learn_allow;
528+ case BR_STATE_FORWARDING:
529+ return true;
530+ default:
531+ return false;
532+ }
533+}
534 #endif
535
536 struct nf_br_ops {
537@@ -1196,6 +1477,12 @@ int br_setlink(struct net_device *dev, struct nlmsghdr *nlmsg, u16 flags,
538 int br_dellink(struct net_device *dev, struct nlmsghdr *nlmsg, u16 flags);
539 int br_getlink(struct sk_buff *skb, u32 pid, u32 seq, struct net_device *dev,
540 u32 filter_mask, int nlflags);
541+int br_process_vlan_info(struct net_bridge *br,
542+ struct net_bridge_port *p, int cmd,
543+ struct bridge_vlan_info *vinfo_curr,
544+ struct bridge_vlan_info **vinfo_last,
545+ bool *changed,
546+ struct netlink_ext_ack *extack);
547
548 #ifdef CONFIG_SYSFS
549 /* br_sysfs_if.c */
550diff --git a/net/bridge/br_private_tunnel.h b/net/bridge/br_private_tunnel.h
551index 2bdef2e..25be963 100644
552--- a/net/bridge/br_private_tunnel.h
553+++ b/net/bridge/br_private_tunnel.h
554@@ -42,6 +42,10 @@ int br_handle_ingress_vlan_tunnel(struct sk_buff *skb,
555 struct net_bridge_vlan_group *vg);
556 int br_handle_egress_vlan_tunnel(struct sk_buff *skb,
557 struct net_bridge_vlan *vlan);
558+bool vlan_tunid_inrange(struct net_bridge_vlan *v_curr,
559+ struct net_bridge_vlan *v_last);
560+int br_vlan_tunnel_info(struct net_bridge_port *p, int cmd,
561+ u16 vid, u32 tun_id, bool *changed);
562 #else
563 static inline int vlan_tunnel_init(struct net_bridge_vlan_group *vg)
564 {
565diff --git a/net/bridge/br_vlan.c b/net/bridge/br_vlan.c
566index bcfd169..2b5950c 100644
567--- a/net/bridge/br_vlan.c
568+++ b/net/bridge/br_vlan.c
569@@ -34,13 +34,15 @@ static struct net_bridge_vlan *br_vlan_lookup(struct rhashtable *tbl, u16 vid)
570 return rhashtable_lookup_fast(tbl, &vid, br_vlan_rht_params);
571 }
572
573-static bool __vlan_add_pvid(struct net_bridge_vlan_group *vg, u16 vid)
574+static bool __vlan_add_pvid(struct net_bridge_vlan_group *vg,
575+ const struct net_bridge_vlan *v)
576 {
577- if (vg->pvid == vid)
578+ if (vg->pvid == v->vid)
579 return false;
580
581 smp_wmb();
582- vg->pvid = vid;
583+ br_vlan_set_pvid_state(vg, v->state);
584+ vg->pvid = v->vid;
585
586 return true;
587 }
588@@ -69,7 +71,7 @@ static bool __vlan_add_flags(struct net_bridge_vlan *v, u16 flags)
589 vg = nbp_vlan_group(v->port);
590
591 if (flags & BRIDGE_VLAN_INFO_PVID)
592- ret = __vlan_add_pvid(vg, v->vid);
593+ ret = __vlan_add_pvid(vg, v);
594 else
595 ret = __vlan_delete_pvid(vg, v->vid);
596
597@@ -257,6 +259,10 @@ static int __vlan_add(struct net_bridge_vlan *v, u16 flags,
598 &changed, extack);
599 if (err)
600 goto out_filt;
601+
602+ if (changed)
603+ br_vlan_notify(br, NULL, v->vid, 0,
604+ RTM_NEWVLAN);
605 }
606
607 masterv = br_vlan_get_master(br, v->vid, extack);
608@@ -266,7 +272,7 @@ static int __vlan_add(struct net_bridge_vlan *v, u16 flags,
609 }
610 v->brvlan = masterv;
611 if (br_opt_get(br, BROPT_VLAN_STATS_PER_PORT)) {
612- v->stats = netdev_alloc_pcpu_stats(struct br_vlan_stats);
613+ v->stats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
614 if (!v->stats) {
615 err = -ENOMEM;
616 goto out_filt;
617@@ -382,13 +388,31 @@ static void __vlan_group_free(struct net_bridge_vlan_group *vg)
618 kfree(vg);
619 }
620
621-static void __vlan_flush(struct net_bridge_vlan_group *vg)
622+static void __vlan_flush(const struct net_bridge *br,
623+ const struct net_bridge_port *p,
624+ struct net_bridge_vlan_group *vg)
625 {
626 struct net_bridge_vlan *vlan, *tmp;
627+ u16 v_start = 0, v_end = 0;
628
629 __vlan_delete_pvid(vg, vg->pvid);
630- list_for_each_entry_safe(vlan, tmp, &vg->vlan_list, vlist)
631+ list_for_each_entry_safe(vlan, tmp, &vg->vlan_list, vlist) {
632+ /* take care of disjoint ranges */
633+ if (!v_start) {
634+ v_start = vlan->vid;
635+ } else if (vlan->vid - v_end != 1) {
636+ /* found range end, notify and start next one */
637+ br_vlan_notify(br, p, v_start, v_end, RTM_DELVLAN);
638+ v_start = vlan->vid;
639+ }
640+ v_end = vlan->vid;
641+
642 __vlan_del(vlan);
643+ }
644+
645+ /* notify about the last/whole vlan range */
646+ if (v_start)
647+ br_vlan_notify(br, p, v_start, v_end, RTM_DELVLAN);
648 }
649
650 struct sk_buff *br_handle_vlan(struct net_bridge *br,
651@@ -396,7 +420,7 @@ struct sk_buff *br_handle_vlan(struct net_bridge *br,
652 struct net_bridge_vlan_group *vg,
653 struct sk_buff *skb)
654 {
655- struct br_vlan_stats *stats;
656+ struct pcpu_sw_netstats *stats;
657 struct net_bridge_vlan *v;
658 u16 vid;
659
660@@ -448,7 +472,7 @@ static bool __allowed_ingress(const struct net_bridge *br,
661 struct net_bridge_vlan_group *vg,
662 struct sk_buff *skb, u16 *vid)
663 {
664- struct br_vlan_stats *stats;
665+ struct pcpu_sw_netstats *stats;
666 struct net_bridge_vlan *v;
667 bool tagged;
668
669@@ -666,7 +690,7 @@ int br_vlan_add(struct net_bridge *br, u16 vid, u16 flags, bool *changed,
670 if (!vlan)
671 return -ENOMEM;
672
673- vlan->stats = netdev_alloc_pcpu_stats(struct br_vlan_stats);
674+ vlan->stats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
675 if (!vlan->stats) {
676 kfree(vlan);
677 return -ENOMEM;
678@@ -718,7 +742,7 @@ void br_vlan_flush(struct net_bridge *br)
679 ASSERT_RTNL();
680
681 vg = br_vlan_group(br);
682- __vlan_flush(vg);
683+ __vlan_flush(br, NULL, vg);
684 RCU_INIT_POINTER(br->vlgrp, NULL);
685 synchronize_rcu();
686 __vlan_group_free(vg);
687@@ -927,12 +951,15 @@ static void br_vlan_disable_default_pvid(struct net_bridge *br)
688 /* Disable default_pvid on all ports where it is still
689 * configured.
690 */
691- if (vlan_default_pvid(br_vlan_group(br), pvid))
692- br_vlan_delete(br, pvid);
693+ if (vlan_default_pvid(br_vlan_group(br), pvid)) {
694+ if (!br_vlan_delete(br, pvid))
695+ br_vlan_notify(br, NULL, pvid, 0, RTM_DELVLAN);
696+ }
697
698 list_for_each_entry(p, &br->port_list, list) {
699- if (vlan_default_pvid(nbp_vlan_group(p), pvid))
700- nbp_vlan_delete(p, pvid);
701+ if (vlan_default_pvid(nbp_vlan_group(p), pvid) &&
702+ !nbp_vlan_delete(p, pvid))
703+ br_vlan_notify(br, p, pvid, 0, RTM_DELVLAN);
704 }
705
706 br->default_pvid = 0;
707@@ -974,7 +1001,10 @@ int __br_vlan_set_default_pvid(struct net_bridge *br, u16 pvid,
708 &vlchange, extack);
709 if (err)
710 goto out;
711- br_vlan_delete(br, old_pvid);
712+
713+ if (br_vlan_delete(br, old_pvid))
714+ br_vlan_notify(br, NULL, old_pvid, 0, RTM_DELVLAN);
715+ br_vlan_notify(br, NULL, pvid, 0, RTM_NEWVLAN);
716 set_bit(0, changed);
717 }
718
719@@ -994,7 +1024,9 @@ int __br_vlan_set_default_pvid(struct net_bridge *br, u16 pvid,
720 &vlchange, extack);
721 if (err)
722 goto err_port;
723- nbp_vlan_delete(p, old_pvid);
724+ if (nbp_vlan_delete(p, old_pvid))
725+ br_vlan_notify(br, p, old_pvid, 0, RTM_DELVLAN);
726+ br_vlan_notify(p->br, p, pvid, 0, RTM_NEWVLAN);
727 set_bit(p->port_no, changed);
728 }
729
730@@ -1009,22 +1041,28 @@ int __br_vlan_set_default_pvid(struct net_bridge *br, u16 pvid,
731 if (!test_bit(p->port_no, changed))
732 continue;
733
734- if (old_pvid)
735+ if (old_pvid) {
736 nbp_vlan_add(p, old_pvid,
737 BRIDGE_VLAN_INFO_PVID |
738 BRIDGE_VLAN_INFO_UNTAGGED,
739 &vlchange, NULL);
740+ br_vlan_notify(p->br, p, old_pvid, 0, RTM_NEWVLAN);
741+ }
742 nbp_vlan_delete(p, pvid);
743+ br_vlan_notify(br, p, pvid, 0, RTM_DELVLAN);
744 }
745
746 if (test_bit(0, changed)) {
747- if (old_pvid)
748+ if (old_pvid) {
749 br_vlan_add(br, old_pvid,
750 BRIDGE_VLAN_INFO_PVID |
751 BRIDGE_VLAN_INFO_UNTAGGED |
752 BRIDGE_VLAN_INFO_BRENTRY,
753 &vlchange, NULL);
754+ br_vlan_notify(br, NULL, old_pvid, 0, RTM_NEWVLAN);
755+ }
756 br_vlan_delete(br, pvid);
757+ br_vlan_notify(br, NULL, pvid, 0, RTM_DELVLAN);
758 }
759 goto out;
760 }
761@@ -1117,6 +1155,7 @@ int nbp_vlan_init(struct net_bridge_port *p, struct netlink_ext_ack *extack)
762 &changed, extack);
763 if (ret)
764 goto err_vlan_add;
765+ br_vlan_notify(p->br, p, p->br->default_pvid, 0, RTM_NEWVLAN);
766 }
767 out:
768 return ret;
769@@ -1198,21 +1237,21 @@ void nbp_vlan_flush(struct net_bridge_port *port)
770 ASSERT_RTNL();
771
772 vg = nbp_vlan_group(port);
773- __vlan_flush(vg);
774+ __vlan_flush(port->br, port, vg);
775 RCU_INIT_POINTER(port->vlgrp, NULL);
776 synchronize_rcu();
777 __vlan_group_free(vg);
778 }
779
780 void br_vlan_get_stats(const struct net_bridge_vlan *v,
781- struct br_vlan_stats *stats)
782+ struct pcpu_sw_netstats *stats)
783 {
784 int i;
785
786 memset(stats, 0, sizeof(*stats));
787 for_each_possible_cpu(i) {
788 u64 rxpackets, rxbytes, txpackets, txbytes;
789- struct br_vlan_stats *cpu_stats;
790+ struct pcpu_sw_netstats *cpu_stats;
791 unsigned int start;
792
793 cpu_stats = per_cpu_ptr(v->stats, i);
794@@ -1526,8 +1565,8 @@ int br_vlan_bridge_event(struct net_device *dev, unsigned long event, void *ptr)
795 {
796 struct netdev_notifier_changeupper_info *info;
797 struct net_bridge *br = netdev_priv(dev);
798- bool changed;
799- int ret = 0;
800+ int vlcmd = 0, ret = 0;
801+ bool changed = false;
802
803 switch (event) {
804 case NETDEV_REGISTER:
805@@ -1535,9 +1574,11 @@ int br_vlan_bridge_event(struct net_device *dev, unsigned long event, void *ptr)
806 BRIDGE_VLAN_INFO_PVID |
807 BRIDGE_VLAN_INFO_UNTAGGED |
808 BRIDGE_VLAN_INFO_BRENTRY, &changed, NULL);
809+ vlcmd = RTM_NEWVLAN;
810 break;
811 case NETDEV_UNREGISTER:
812- br_vlan_delete(br, br->default_pvid);
813+ changed = !br_vlan_delete(br, br->default_pvid);
814+ vlcmd = RTM_DELVLAN;
815 break;
816 case NETDEV_CHANGEUPPER:
817 info = ptr;
818@@ -1551,6 +1592,8 @@ int br_vlan_bridge_event(struct net_device *dev, unsigned long event, void *ptr)
819 br_vlan_link_state_change(dev, br);
820 break;
821 }
822+ if (changed)
823+ br_vlan_notify(br, NULL, br->default_pvid, 0, vlcmd);
824
825 return ret;
826 }
827@@ -1569,3 +1612,608 @@ void br_vlan_port_event(struct net_bridge_port *p, unsigned long event)
828 break;
829 }
830 }
831+
832+static bool br_vlan_stats_fill(struct sk_buff *skb,
833+ const struct net_bridge_vlan *v)
834+{
835+ struct pcpu_sw_netstats stats;
836+ struct nlattr *nest;
837+
838+ nest = nla_nest_start(skb, BRIDGE_VLANDB_ENTRY_STATS);
839+ if (!nest)
840+ return false;
841+
842+ br_vlan_get_stats(v, &stats);
843+ if (nla_put_u64_64bit(skb, BRIDGE_VLANDB_STATS_RX_BYTES, stats.rx_bytes,
844+ BRIDGE_VLANDB_STATS_PAD) ||
845+ nla_put_u64_64bit(skb, BRIDGE_VLANDB_STATS_RX_PACKETS,
846+ stats.rx_packets, BRIDGE_VLANDB_STATS_PAD) ||
847+ nla_put_u64_64bit(skb, BRIDGE_VLANDB_STATS_TX_BYTES, stats.tx_bytes,
848+ BRIDGE_VLANDB_STATS_PAD) ||
849+ nla_put_u64_64bit(skb, BRIDGE_VLANDB_STATS_TX_PACKETS,
850+ stats.tx_packets, BRIDGE_VLANDB_STATS_PAD))
851+ goto out_err;
852+
853+ nla_nest_end(skb, nest);
854+
855+ return true;
856+
857+out_err:
858+ nla_nest_cancel(skb, nest);
859+ return false;
860+}
861+
862+/* v_opts is used to dump the options which must be equal in the whole range */
863+static bool br_vlan_fill_vids(struct sk_buff *skb, u16 vid, u16 vid_range,
864+ const struct net_bridge_vlan *v_opts,
865+ u16 flags,
866+ bool dump_stats)
867+{
868+ struct bridge_vlan_info info;
869+ struct nlattr *nest;
870+
871+ nest = nla_nest_start(skb, BRIDGE_VLANDB_ENTRY);
872+ if (!nest)
873+ return false;
874+
875+ memset(&info, 0, sizeof(info));
876+ info.vid = vid;
877+ if (flags & BRIDGE_VLAN_INFO_UNTAGGED)
878+ info.flags |= BRIDGE_VLAN_INFO_UNTAGGED;
879+ if (flags & BRIDGE_VLAN_INFO_PVID)
880+ info.flags |= BRIDGE_VLAN_INFO_PVID;
881+
882+ if (nla_put(skb, BRIDGE_VLANDB_ENTRY_INFO, sizeof(info), &info))
883+ goto out_err;
884+
885+ if (vid_range && vid < vid_range &&
886+ !(flags & BRIDGE_VLAN_INFO_PVID) &&
887+ nla_put_u16(skb, BRIDGE_VLANDB_ENTRY_RANGE, vid_range))
888+ goto out_err;
889+
890+ if (v_opts) {
891+ if (!br_vlan_opts_fill(skb, v_opts))
892+ goto out_err;
893+
894+ if (dump_stats && !br_vlan_stats_fill(skb, v_opts))
895+ goto out_err;
896+ }
897+
898+ nla_nest_end(skb, nest);
899+
900+ return true;
901+
902+out_err:
903+ nla_nest_cancel(skb, nest);
904+ return false;
905+}
906+
907+static size_t rtnl_vlan_nlmsg_size(void)
908+{
909+ return NLMSG_ALIGN(sizeof(struct br_vlan_msg))
910+ + nla_total_size(0) /* BRIDGE_VLANDB_ENTRY */
911+ + nla_total_size(sizeof(u16)) /* BRIDGE_VLANDB_ENTRY_RANGE */
912+ + nla_total_size(sizeof(struct bridge_vlan_info)) /* BRIDGE_VLANDB_ENTRY_INFO */
913+ + br_vlan_opts_nl_size(); /* bridge vlan options */
914+}
915+
916+void br_vlan_notify(const struct net_bridge *br,
917+ const struct net_bridge_port *p,
918+ u16 vid, u16 vid_range,
919+ int cmd)
920+{
921+ struct net_bridge_vlan_group *vg;
922+ struct net_bridge_vlan *v = NULL;
923+ struct br_vlan_msg *bvm;
924+ struct nlmsghdr *nlh;
925+ struct sk_buff *skb;
926+ int err = -ENOBUFS;
927+ struct net *net;
928+ u16 flags = 0;
929+ int ifindex;
930+
931+ /* right now notifications are done only with rtnl held */
932+ ASSERT_RTNL();
933+
934+ if (p) {
935+ ifindex = p->dev->ifindex;
936+ vg = nbp_vlan_group(p);
937+ net = dev_net(p->dev);
938+ } else {
939+ ifindex = br->dev->ifindex;
940+ vg = br_vlan_group(br);
941+ net = dev_net(br->dev);
942+ }
943+
944+ skb = nlmsg_new(rtnl_vlan_nlmsg_size(), GFP_KERNEL);
945+ if (!skb)
946+ goto out_err;
947+
948+ err = -EMSGSIZE;
949+ nlh = nlmsg_put(skb, 0, 0, cmd, sizeof(*bvm), 0);
950+ if (!nlh)
951+ goto out_err;
952+ bvm = nlmsg_data(nlh);
953+ memset(bvm, 0, sizeof(*bvm));
954+ bvm->family = AF_BRIDGE;
955+ bvm->ifindex = ifindex;
956+
957+ switch (cmd) {
958+ case RTM_NEWVLAN:
959+ /* need to find the vlan due to flags/options */
960+ v = br_vlan_find(vg, vid);
961+ if (!v || !br_vlan_should_use(v))
962+ goto out_kfree;
963+
964+ flags = v->flags;
965+ if (br_get_pvid(vg) == v->vid)
966+ flags |= BRIDGE_VLAN_INFO_PVID;
967+ break;
968+ case RTM_DELVLAN:
969+ break;
970+ default:
971+ goto out_kfree;
972+ }
973+
974+ if (!br_vlan_fill_vids(skb, vid, vid_range, v, flags, false))
975+ goto out_err;
976+
977+ nlmsg_end(skb, nlh);
978+ rtnl_notify(skb, net, 0, RTNLGRP_BRVLAN, NULL, GFP_KERNEL);
979+ return;
980+
981+out_err:
982+ rtnl_set_sk_err(net, RTNLGRP_BRVLAN, err);
983+out_kfree:
984+ kfree_skb(skb);
985+}
986+
987+static int br_vlan_replay_one(struct notifier_block *nb,
988+ struct net_device *dev,
989+ struct switchdev_obj_port_vlan *vlan,
990+ const void *ctx, unsigned long action,
991+ struct netlink_ext_ack *extack)
992+{
993+ struct switchdev_notifier_port_obj_info obj_info = {
994+ .info = {
995+ .dev = dev,
996+ .extack = extack,
997+ .ctx = ctx,
998+ },
999+ .obj = &vlan->obj,
1000+ };
1001+ int err;
1002+
1003+ err = nb->notifier_call(nb, action, &obj_info);
1004+ return notifier_to_errno(err);
1005+}
1006+
1007+int br_vlan_replay(struct net_device *br_dev, struct net_device *dev,
1008+ const void *ctx, bool adding, struct notifier_block *nb,
1009+ struct netlink_ext_ack *extack)
1010+{
1011+ struct net_bridge_vlan_group *vg;
1012+ struct net_bridge_vlan *v;
1013+ struct net_bridge_port *p;
1014+ struct net_bridge *br;
1015+ unsigned long action;
1016+ int err = 0;
1017+ u16 pvid;
1018+
1019+ ASSERT_RTNL();
1020+
1021+ if (!nb)
1022+ return 0;
1023+
1024+ if (!netif_is_bridge_master(br_dev))
1025+ return -EINVAL;
1026+
1027+ if (!netif_is_bridge_master(dev) && !netif_is_bridge_port(dev))
1028+ return -EINVAL;
1029+
1030+ if (netif_is_bridge_master(dev)) {
1031+ br = netdev_priv(dev);
1032+ vg = br_vlan_group(br);
1033+ p = NULL;
1034+ } else {
1035+ p = br_port_get_rtnl(dev);
1036+ if (WARN_ON(!p))
1037+ return -EINVAL;
1038+ vg = nbp_vlan_group(p);
1039+ br = p->br;
1040+ }
1041+
1042+ if (!vg)
1043+ return 0;
1044+
1045+ if (adding)
1046+ action = SWITCHDEV_PORT_OBJ_ADD;
1047+ else
1048+ action = SWITCHDEV_PORT_OBJ_DEL;
1049+
1050+ pvid = br_get_pvid(vg);
1051+
1052+ list_for_each_entry(v, &vg->vlan_list, vlist) {
1053+ struct switchdev_obj_port_vlan vlan = {
1054+ .obj.orig_dev = dev,
1055+ .obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN,
1056+ .flags = br_vlan_flags(v, pvid),
1057+ .vid = v->vid,
1058+ };
1059+
1060+ if (!br_vlan_should_use(v))
1061+ continue;
1062+
1063+ err = br_vlan_replay_one(nb, dev, &vlan, ctx, action, extack);
1064+ if (err)
1065+ return err;
1066+ }
1067+
1068+ return err;
1069+}
1070+
1071+/* check if v_curr can enter a range ending in range_end */
1072+bool br_vlan_can_enter_range(struct net_bridge_vlan *v_curr,
1073+ struct net_bridge_vlan *range_end)
1074+{
1075+ return v_curr->vid - range_end->vid == 1 &&
1076+ range_end->flags == v_curr->flags &&
1077+ br_vlan_opts_eq_range(v_curr, range_end);
1078+}
1079+
1080+static int br_vlan_dump_dev(const struct net_device *dev,
1081+ struct sk_buff *skb,
1082+ struct netlink_callback *cb,
1083+ u32 dump_flags)
1084+{
1085+ struct net_bridge_vlan *v, *range_start = NULL, *range_end = NULL;
1086+ bool dump_global = !!(dump_flags & BRIDGE_VLANDB_DUMPF_GLOBAL);
1087+ bool dump_stats = !!(dump_flags & BRIDGE_VLANDB_DUMPF_STATS);
1088+ struct net_bridge_vlan_group *vg;
1089+ int idx = 0, s_idx = cb->args[1];
1090+ struct nlmsghdr *nlh = NULL;
1091+ struct net_bridge_port *p;
1092+ struct br_vlan_msg *bvm;
1093+ struct net_bridge *br;
1094+ int err = 0;
1095+ u16 pvid;
1096+
1097+ if (!netif_is_bridge_master(dev) && !netif_is_bridge_port(dev))
1098+ return -EINVAL;
1099+
1100+ if (netif_is_bridge_master(dev)) {
1101+ br = netdev_priv(dev);
1102+ vg = br_vlan_group_rcu(br);
1103+ p = NULL;
1104+ } else {
1105+ /* global options are dumped only for bridge devices */
1106+ if (dump_global)
1107+ return 0;
1108+
1109+ p = br_port_get_rcu(dev);
1110+ if (WARN_ON(!p))
1111+ return -EINVAL;
1112+ vg = nbp_vlan_group_rcu(p);
1113+ br = p->br;
1114+ }
1115+
1116+ if (!vg)
1117+ return 0;
1118+
1119+ nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
1120+ RTM_NEWVLAN, sizeof(*bvm), NLM_F_MULTI);
1121+ if (!nlh)
1122+ return -EMSGSIZE;
1123+ bvm = nlmsg_data(nlh);
1124+ memset(bvm, 0, sizeof(*bvm));
1125+ bvm->family = PF_BRIDGE;
1126+ bvm->ifindex = dev->ifindex;
1127+ pvid = br_get_pvid(vg);
1128+
1129+ /* idx must stay at range's beginning until it is filled in */
1130+ list_for_each_entry_rcu(v, &vg->vlan_list, vlist) {
1131+ if (!dump_global && !br_vlan_should_use(v))
1132+ continue;
1133+ if (idx < s_idx) {
1134+ idx++;
1135+ continue;
1136+ }
1137+
1138+ if (!range_start) {
1139+ range_start = v;
1140+ range_end = v;
1141+ continue;
1142+ }
1143+
1144+ if (dump_global) {
1145+ if (br_vlan_global_opts_can_enter_range(v, range_end))
1146+ goto update_end;
1147+ if (!br_vlan_global_opts_fill(skb, range_start->vid,
1148+ range_end->vid,
1149+ range_start)) {
1150+ err = -EMSGSIZE;
1151+ break;
1152+ }
1153+ /* advance number of filled vlans */
1154+ idx += range_end->vid - range_start->vid + 1;
1155+
1156+ range_start = v;
1157+ } else if (dump_stats || v->vid == pvid ||
1158+ !br_vlan_can_enter_range(v, range_end)) {
1159+ u16 vlan_flags = br_vlan_flags(range_start, pvid);
1160+
1161+ if (!br_vlan_fill_vids(skb, range_start->vid,
1162+ range_end->vid, range_start,
1163+ vlan_flags, dump_stats)) {
1164+ err = -EMSGSIZE;
1165+ break;
1166+ }
1167+ /* advance number of filled vlans */
1168+ idx += range_end->vid - range_start->vid + 1;
1169+
1170+ range_start = v;
1171+ }
1172+update_end:
1173+ range_end = v;
1174+ }
1175+
1176+ /* err will be 0 and range_start will be set in 3 cases here:
1177+ * - first vlan (range_start == range_end)
1178+ * - last vlan (range_start == range_end, not in range)
1179+ * - last vlan range (range_start != range_end, in range)
1180+ */
1181+ if (!err && range_start) {
1182+ if (dump_global &&
1183+ !br_vlan_global_opts_fill(skb, range_start->vid,
1184+ range_end->vid, range_start))
1185+ err = -EMSGSIZE;
1186+ else if (!dump_global &&
1187+ !br_vlan_fill_vids(skb, range_start->vid,
1188+ range_end->vid, range_start,
1189+ br_vlan_flags(range_start, pvid),
1190+ dump_stats))
1191+ err = -EMSGSIZE;
1192+ }
1193+
1194+ cb->args[1] = err ? idx : 0;
1195+
1196+ nlmsg_end(skb, nlh);
1197+
1198+ return err;
1199+}
1200+
1201+static const struct nla_policy br_vlan_db_dump_pol[BRIDGE_VLANDB_DUMP_MAX + 1] = {
1202+ [BRIDGE_VLANDB_DUMP_FLAGS] = { .type = NLA_U32 },
1203+};
1204+
1205+static int br_vlan_rtm_dump(struct sk_buff *skb, struct netlink_callback *cb)
1206+{
1207+ struct nlattr *dtb[BRIDGE_VLANDB_DUMP_MAX + 1];
1208+ int idx = 0, err = 0, s_idx = cb->args[0];
1209+ struct net *net = sock_net(skb->sk);
1210+ struct br_vlan_msg *bvm;
1211+ struct net_device *dev;
1212+ u32 dump_flags = 0;
1213+
1214+ err = nlmsg_parse(cb->nlh, sizeof(*bvm), dtb, BRIDGE_VLANDB_DUMP_MAX,
1215+ br_vlan_db_dump_pol, cb->extack);
1216+ if (err < 0)
1217+ return err;
1218+
1219+ bvm = nlmsg_data(cb->nlh);
1220+ if (dtb[BRIDGE_VLANDB_DUMP_FLAGS])
1221+ dump_flags = nla_get_u32(dtb[BRIDGE_VLANDB_DUMP_FLAGS]);
1222+
1223+ rcu_read_lock();
1224+ if (bvm->ifindex) {
1225+ dev = dev_get_by_index_rcu(net, bvm->ifindex);
1226+ if (!dev) {
1227+ err = -ENODEV;
1228+ goto out_err;
1229+ }
1230+ err = br_vlan_dump_dev(dev, skb, cb, dump_flags);
1231+ /* if the dump completed without an error we return 0 here */
1232+ if (err != -EMSGSIZE)
1233+ goto out_err;
1234+ } else {
1235+ for_each_netdev_rcu(net, dev) {
1236+ if (idx < s_idx)
1237+ goto skip;
1238+
1239+ err = br_vlan_dump_dev(dev, skb, cb, dump_flags);
1240+ if (err == -EMSGSIZE)
1241+ break;
1242+skip:
1243+ idx++;
1244+ }
1245+ }
1246+ cb->args[0] = idx;
1247+ rcu_read_unlock();
1248+
1249+ return skb->len;
1250+
1251+out_err:
1252+ rcu_read_unlock();
1253+
1254+ return err;
1255+}
1256+
1257+static const struct nla_policy br_vlan_db_policy[BRIDGE_VLANDB_ENTRY_MAX + 1] = {
1258+ [BRIDGE_VLANDB_ENTRY_INFO] =
1259+ NLA_POLICY_EXACT_LEN(sizeof(struct bridge_vlan_info)),
1260+ [BRIDGE_VLANDB_ENTRY_RANGE] = { .type = NLA_U16 },
1261+ [BRIDGE_VLANDB_ENTRY_STATE] = { .type = NLA_U8 },
1262+ [BRIDGE_VLANDB_ENTRY_TUNNEL_INFO] = { .type = NLA_NESTED },
1263+ [BRIDGE_VLANDB_ENTRY_MCAST_ROUTER] = { .type = NLA_U8 },
1264+};
1265+
1266+static int br_vlan_rtm_process_one(struct net_device *dev,
1267+ const struct nlattr *attr,
1268+ int cmd, struct netlink_ext_ack *extack)
1269+{
1270+ struct bridge_vlan_info *vinfo, vrange_end, *vinfo_last = NULL;
1271+ struct nlattr *tb[BRIDGE_VLANDB_ENTRY_MAX + 1];
1272+ bool changed = false, skip_processing = false;
1273+ struct net_bridge_vlan_group *vg;
1274+ struct net_bridge_port *p = NULL;
1275+ int err = 0, cmdmap = 0;
1276+ struct net_bridge *br;
1277+
1278+ if (netif_is_bridge_master(dev)) {
1279+ br = netdev_priv(dev);
1280+ vg = br_vlan_group(br);
1281+ } else {
1282+ p = br_port_get_rtnl(dev);
1283+ if (WARN_ON(!p))
1284+ return -ENODEV;
1285+ br = p->br;
1286+ vg = nbp_vlan_group(p);
1287+ }
1288+
1289+ if (WARN_ON(!vg))
1290+ return -ENODEV;
1291+
1292+ err = nla_parse_nested(tb, BRIDGE_VLANDB_ENTRY_MAX, attr,
1293+ br_vlan_db_policy, extack);
1294+ if (err)
1295+ return err;
1296+
1297+ if (!tb[BRIDGE_VLANDB_ENTRY_INFO]) {
1298+ NL_SET_ERR_MSG_MOD(extack, "Missing vlan entry info");
1299+ return -EINVAL;
1300+ }
1301+ memset(&vrange_end, 0, sizeof(vrange_end));
1302+
1303+ vinfo = nla_data(tb[BRIDGE_VLANDB_ENTRY_INFO]);
1304+ if (vinfo->flags & (BRIDGE_VLAN_INFO_RANGE_BEGIN |
1305+ BRIDGE_VLAN_INFO_RANGE_END)) {
1306+ NL_SET_ERR_MSG_MOD(extack, "Old-style vlan ranges are not allowed when using RTM vlan calls");
1307+ return -EINVAL;
1308+ }
1309+ if (!br_vlan_valid_id(vinfo->vid, extack))
1310+ return -EINVAL;
1311+
1312+ if (tb[BRIDGE_VLANDB_ENTRY_RANGE]) {
1313+ vrange_end.vid = nla_get_u16(tb[BRIDGE_VLANDB_ENTRY_RANGE]);
1314+ /* validate user-provided flags without RANGE_BEGIN */
1315+ vrange_end.flags = BRIDGE_VLAN_INFO_RANGE_END | vinfo->flags;
1316+ vinfo->flags |= BRIDGE_VLAN_INFO_RANGE_BEGIN;
1317+
1318+ /* vinfo_last is the range start, vinfo the range end */
1319+ vinfo_last = vinfo;
1320+ vinfo = &vrange_end;
1321+
1322+ if (!br_vlan_valid_id(vinfo->vid, extack) ||
1323+ !br_vlan_valid_range(vinfo, vinfo_last, extack))
1324+ return -EINVAL;
1325+ }
1326+
1327+ switch (cmd) {
1328+ case RTM_NEWVLAN:
1329+ cmdmap = RTM_SETLINK;
1330+ skip_processing = !!(vinfo->flags & BRIDGE_VLAN_INFO_ONLY_OPTS);
1331+ break;
1332+ case RTM_DELVLAN:
1333+ cmdmap = RTM_DELLINK;
1334+ break;
1335+ }
1336+
1337+ if (!skip_processing) {
1338+ struct bridge_vlan_info *tmp_last = vinfo_last;
1339+
1340+ /* br_process_vlan_info may overwrite vinfo_last */
1341+ err = br_process_vlan_info(br, p, cmdmap, vinfo, &tmp_last,
1342+ &changed, extack);
1343+
1344+ /* notify first if anything changed */
1345+ if (changed)
1346+ br_ifinfo_notify(cmdmap, br, p);
1347+
1348+ if (err)
1349+ return err;
1350+ }
1351+
1352+ /* deal with options */
1353+ if (cmd == RTM_NEWVLAN) {
1354+ struct net_bridge_vlan *range_start, *range_end;
1355+
1356+ if (vinfo_last) {
1357+ range_start = br_vlan_find(vg, vinfo_last->vid);
1358+ range_end = br_vlan_find(vg, vinfo->vid);
1359+ } else {
1360+ range_start = br_vlan_find(vg, vinfo->vid);
1361+ range_end = range_start;
1362+ }
1363+
1364+ err = br_vlan_process_options(br, p, range_start, range_end,
1365+ tb, extack);
1366+ }
1367+
1368+ return err;
1369+}
1370+
1371+static int br_vlan_rtm_process(struct sk_buff *skb, struct nlmsghdr *nlh,
1372+ struct netlink_ext_ack *extack)
1373+{
1374+ struct net *net = sock_net(skb->sk);
1375+ struct br_vlan_msg *bvm;
1376+ struct net_device *dev;
1377+ struct nlattr *attr;
1378+ int err, vlans = 0;
1379+ int rem;
1380+
1381+ /* this should validate the header and check for remaining bytes */
1382+ err = nlmsg_parse(nlh, sizeof(*bvm), NULL, BRIDGE_VLANDB_MAX, NULL,
1383+ extack);
1384+ if (err < 0)
1385+ return err;
1386+
1387+ bvm = nlmsg_data(nlh);
1388+ dev = __dev_get_by_index(net, bvm->ifindex);
1389+ if (!dev)
1390+ return -ENODEV;
1391+
1392+ if (!netif_is_bridge_master(dev) && !netif_is_bridge_port(dev)) {
1393+ NL_SET_ERR_MSG_MOD(extack, "The device is not a valid bridge or bridge port");
1394+ return -EINVAL;
1395+ }
1396+
1397+ nlmsg_for_each_attr(attr, nlh, sizeof(*bvm), rem) {
1398+ switch (nla_type(attr)) {
1399+ case BRIDGE_VLANDB_ENTRY:
1400+ err = br_vlan_rtm_process_one(dev, attr,
1401+ nlh->nlmsg_type,
1402+ extack);
1403+ break;
1404+ default:
1405+ continue;
1406+ }
1407+
1408+ vlans++;
1409+ if (err)
1410+ break;
1411+ }
1412+ if (!vlans) {
1413+ NL_SET_ERR_MSG_MOD(extack, "No vlans found to process");
1414+ err = -EINVAL;
1415+ }
1416+
1417+ return err;
1418+}
1419+
1420+void br_vlan_rtnl_init(void)
1421+{
1422+ rtnl_register_module(THIS_MODULE, PF_BRIDGE, RTM_GETVLAN, NULL,
1423+ br_vlan_rtm_dump, 0);
1424+ rtnl_register_module(THIS_MODULE, PF_BRIDGE, RTM_NEWVLAN,
1425+ br_vlan_rtm_process, NULL, 0);
1426+ rtnl_register_module(THIS_MODULE, PF_BRIDGE, RTM_DELVLAN,
1427+ br_vlan_rtm_process, NULL, 0);
1428+}
1429+
1430+void br_vlan_rtnl_uninit(void)
1431+{
1432+ rtnl_unregister(PF_BRIDGE, RTM_GETVLAN);
1433+ rtnl_unregister(PF_BRIDGE, RTM_NEWVLAN);
1434+ rtnl_unregister(PF_BRIDGE, RTM_DELVLAN);
1435+}
1436diff --git a/net/bridge/br_vlan_options.c b/net/bridge/br_vlan_options.c
1437new file mode 100644
1438index 0000000..5e48c29
1439--- /dev/null
1440+++ b/net/bridge/br_vlan_options.c
1441@@ -0,0 +1,346 @@
1442+// SPDX-License-Identifier: GPL-2.0-only
1443+// Copyright (c) 2020, Nikolay Aleksandrov <nikolay@cumulusnetworks.com>
1444+#include <linux/kernel.h>
1445+#include <linux/netdevice.h>
1446+#include <linux/rtnetlink.h>
1447+#include <linux/slab.h>
1448+#include <net/ip_tunnels.h>
1449+
1450+#include "br_private.h"
1451+#include "br_private_tunnel.h"
1452+
1453+static bool __vlan_tun_put(struct sk_buff *skb, const struct net_bridge_vlan *v)
1454+{
1455+ __be32 tid = tunnel_id_to_key32(v->tinfo.tunnel_id);
1456+ struct nlattr *nest;
1457+
1458+ if (!v->tinfo.tunnel_dst)
1459+ return true;
1460+
1461+ nest = nla_nest_start(skb, BRIDGE_VLANDB_ENTRY_TUNNEL_INFO);
1462+ if (!nest)
1463+ return false;
1464+ if (nla_put_u32(skb, BRIDGE_VLANDB_TINFO_ID, be32_to_cpu(tid))) {
1465+ nla_nest_cancel(skb, nest);
1466+ return false;
1467+ }
1468+ nla_nest_end(skb, nest);
1469+
1470+ return true;
1471+}
1472+
1473+static bool __vlan_tun_can_enter_range(struct net_bridge_vlan *v_curr,
1474+ struct net_bridge_vlan *range_end)
1475+{
1476+ return (!v_curr->tinfo.tunnel_dst && !range_end->tinfo.tunnel_dst) ||
1477+ vlan_tunid_inrange(v_curr, range_end);
1478+}
1479+
1480+/* check if the options' state of v_curr allow it to enter the range */
1481+bool br_vlan_opts_eq_range(struct net_bridge_vlan *v_curr,
1482+ struct net_bridge_vlan *range_end)
1483+{
1484+ u8 range_mc_rtr = br_vlan_multicast_router(range_end);
1485+ u8 curr_mc_rtr = br_vlan_multicast_router(v_curr);
1486+
1487+ return v_curr->state == range_end->state &&
1488+ __vlan_tun_can_enter_range(v_curr, range_end) &&
1489+ curr_mc_rtr == range_mc_rtr;
1490+}
1491+
1492+bool br_vlan_opts_fill(struct sk_buff *skb, const struct net_bridge_vlan *v)
1493+{
1494+ if (nla_put_u8(skb, BRIDGE_VLANDB_ENTRY_STATE, br_vlan_get_state(v)) ||
1495+ !__vlan_tun_put(skb, v))
1496+ return false;
1497+
1498+#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
1499+ if (nla_put_u8(skb, BRIDGE_VLANDB_ENTRY_MCAST_ROUTER,
1500+ br_vlan_multicast_router(v)))
1501+ return false;
1502+#endif
1503+
1504+ return true;
1505+}
1506+
1507+size_t br_vlan_opts_nl_size(void)
1508+{
1509+ return nla_total_size(sizeof(u8)) /* BRIDGE_VLANDB_ENTRY_STATE */
1510+ + nla_total_size(0) /* BRIDGE_VLANDB_ENTRY_TUNNEL_INFO */
1511+ + nla_total_size(sizeof(u32)) /* BRIDGE_VLANDB_TINFO_ID */
1512+#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
1513+ + nla_total_size(sizeof(u8)) /* BRIDGE_VLANDB_ENTRY_MCAST_ROUTER */
1514+#endif
1515+ + 0;
1516+}
1517+
1518+static int br_vlan_modify_state(struct net_bridge_vlan_group *vg,
1519+ struct net_bridge_vlan *v,
1520+ u8 state,
1521+ bool *changed,
1522+ struct netlink_ext_ack *extack)
1523+{
1524+ struct net_bridge *br;
1525+
1526+ ASSERT_RTNL();
1527+
1528+ if (state > BR_STATE_BLOCKING) {
1529+ NL_SET_ERR_MSG_MOD(extack, "Invalid vlan state");
1530+ return -EINVAL;
1531+ }
1532+
1533+ if (br_vlan_is_brentry(v))
1534+ br = v->br;
1535+ else
1536+ br = v->port->br;
1537+
1538+ if (br->stp_enabled == BR_KERNEL_STP) {
1539+ NL_SET_ERR_MSG_MOD(extack, "Can't modify vlan state when using kernel STP");
1540+ return -EBUSY;
1541+ }
1542+
1543+ if (v->state == state)
1544+ return 0;
1545+
1546+ if (v->vid == br_get_pvid(vg))
1547+ br_vlan_set_pvid_state(vg, state);
1548+
1549+ br_vlan_set_state(v, state);
1550+ *changed = true;
1551+
1552+ return 0;
1553+}
1554+
1555+static const struct nla_policy br_vlandb_tinfo_pol[BRIDGE_VLANDB_TINFO_MAX + 1] = {
1556+ [BRIDGE_VLANDB_TINFO_ID] = { .type = NLA_U32 },
1557+ [BRIDGE_VLANDB_TINFO_CMD] = { .type = NLA_U32 },
1558+};
1559+
1560+static int br_vlan_modify_tunnel(struct net_bridge_port *p,
1561+ struct net_bridge_vlan *v,
1562+ struct nlattr **tb,
1563+ bool *changed,
1564+ struct netlink_ext_ack *extack)
1565+{
1566+ struct nlattr *tun_tb[BRIDGE_VLANDB_TINFO_MAX + 1], *attr;
1567+ struct bridge_vlan_info *vinfo;
1568+ u32 tun_id = 0;
1569+ int cmd, err;
1570+
1571+ if (!p) {
1572+ NL_SET_ERR_MSG_MOD(extack, "Can't modify tunnel mapping of non-port vlans");
1573+ return -EINVAL;
1574+ }
1575+ if (!(p->flags & BR_VLAN_TUNNEL)) {
1576+ NL_SET_ERR_MSG_MOD(extack, "Port doesn't have tunnel flag set");
1577+ return -EINVAL;
1578+ }
1579+
1580+ attr = tb[BRIDGE_VLANDB_ENTRY_TUNNEL_INFO];
1581+ err = nla_parse_nested(tun_tb, BRIDGE_VLANDB_TINFO_MAX, attr,
1582+ br_vlandb_tinfo_pol, extack);
1583+ if (err)
1584+ return err;
1585+
1586+ if (!tun_tb[BRIDGE_VLANDB_TINFO_CMD]) {
1587+ NL_SET_ERR_MSG_MOD(extack, "Missing tunnel command attribute");
1588+ return -ENOENT;
1589+ }
1590+ cmd = nla_get_u32(tun_tb[BRIDGE_VLANDB_TINFO_CMD]);
1591+ switch (cmd) {
1592+ case RTM_SETLINK:
1593+ if (!tun_tb[BRIDGE_VLANDB_TINFO_ID]) {
1594+ NL_SET_ERR_MSG_MOD(extack, "Missing tunnel id attribute");
1595+ return -ENOENT;
1596+ }
1597+ /* when working on vlan ranges this is the starting tunnel id */
1598+ tun_id = nla_get_u32(tun_tb[BRIDGE_VLANDB_TINFO_ID]);
1599+ /* vlan info attr is guaranteed by br_vlan_rtm_process_one */
1600+ vinfo = nla_data(tb[BRIDGE_VLANDB_ENTRY_INFO]);
1601+ /* tunnel ids are mapped to each vlan in increasing order,
1602+ * the starting vlan is in BRIDGE_VLANDB_ENTRY_INFO and v is the
1603+ * current vlan, so we compute: tun_id + v - vinfo->vid
1604+ */
1605+ tun_id += v->vid - vinfo->vid;
1606+ break;
1607+ case RTM_DELLINK:
1608+ break;
1609+ default:
1610+ NL_SET_ERR_MSG_MOD(extack, "Unsupported tunnel command");
1611+ return -EINVAL;
1612+ }
1613+
1614+ return br_vlan_tunnel_info(p, cmd, v->vid, tun_id, changed);
1615+}
1616+
1617+static int br_vlan_process_one_opts(const struct net_bridge *br,
1618+ struct net_bridge_port *p,
1619+ struct net_bridge_vlan_group *vg,
1620+ struct net_bridge_vlan *v,
1621+ struct nlattr **tb,
1622+ bool *changed,
1623+ struct netlink_ext_ack *extack)
1624+{
1625+ int err;
1626+
1627+ *changed = false;
1628+ if (tb[BRIDGE_VLANDB_ENTRY_STATE]) {
1629+ u8 state = nla_get_u8(tb[BRIDGE_VLANDB_ENTRY_STATE]);
1630+
1631+ err = br_vlan_modify_state(vg, v, state, changed, extack);
1632+ if (err)
1633+ return err;
1634+ }
1635+ if (tb[BRIDGE_VLANDB_ENTRY_TUNNEL_INFO]) {
1636+ err = br_vlan_modify_tunnel(p, v, tb, changed, extack);
1637+ if (err)
1638+ return err;
1639+ }
1640+
1641+ return 0;
1642+}
1643+
1644+int br_vlan_process_options(const struct net_bridge *br,
1645+ struct net_bridge_port *p,
1646+ struct net_bridge_vlan *range_start,
1647+ struct net_bridge_vlan *range_end,
1648+ struct nlattr **tb,
1649+ struct netlink_ext_ack *extack)
1650+{
1651+ struct net_bridge_vlan *v, *curr_start = NULL, *curr_end = NULL;
1652+ struct net_bridge_vlan_group *vg;
1653+ int vid, err = 0;
1654+ u16 pvid;
1655+
1656+ if (p)
1657+ vg = nbp_vlan_group(p);
1658+ else
1659+ vg = br_vlan_group(br);
1660+
1661+ if (!range_start || !br_vlan_should_use(range_start)) {
1662+ NL_SET_ERR_MSG_MOD(extack, "Vlan range start doesn't exist, can't process options");
1663+ return -ENOENT;
1664+ }
1665+ if (!range_end || !br_vlan_should_use(range_end)) {
1666+ NL_SET_ERR_MSG_MOD(extack, "Vlan range end doesn't exist, can't process options");
1667+ return -ENOENT;
1668+ }
1669+
1670+ pvid = br_get_pvid(vg);
1671+ for (vid = range_start->vid; vid <= range_end->vid; vid++) {
1672+ bool changed = false;
1673+
1674+ v = br_vlan_find(vg, vid);
1675+ if (!v || !br_vlan_should_use(v)) {
1676+ NL_SET_ERR_MSG_MOD(extack, "Vlan in range doesn't exist, can't process options");
1677+ err = -ENOENT;
1678+ break;
1679+ }
1680+
1681+ err = br_vlan_process_one_opts(br, p, vg, v, tb, &changed,
1682+ extack);
1683+ if (err)
1684+ break;
1685+
1686+ if (changed) {
1687+ /* vlan options changed, check for range */
1688+ if (!curr_start) {
1689+ curr_start = v;
1690+ curr_end = v;
1691+ continue;
1692+ }
1693+
1694+ if (v->vid == pvid ||
1695+ !br_vlan_can_enter_range(v, curr_end)) {
1696+ br_vlan_notify(br, p, curr_start->vid,
1697+ curr_end->vid, RTM_NEWVLAN);
1698+ curr_start = v;
1699+ }
1700+ curr_end = v;
1701+ } else {
1702+ /* nothing changed and nothing to notify yet */
1703+ if (!curr_start)
1704+ continue;
1705+
1706+ br_vlan_notify(br, p, curr_start->vid, curr_end->vid,
1707+ RTM_NEWVLAN);
1708+ curr_start = NULL;
1709+ curr_end = NULL;
1710+ }
1711+ }
1712+ if (curr_start)
1713+ br_vlan_notify(br, p, curr_start->vid, curr_end->vid,
1714+ RTM_NEWVLAN);
1715+
1716+ return err;
1717+}
1718+
1719+bool br_vlan_global_opts_can_enter_range(const struct net_bridge_vlan *v_curr,
1720+ const struct net_bridge_vlan *r_end)
1721+{
1722+ return v_curr->vid - r_end->vid == 1 &&
1723+ ((v_curr->priv_flags ^ r_end->priv_flags) &
1724+ BR_VLFLAG_GLOBAL_MCAST_ENABLED) == 0 &&
1725+ br_multicast_ctx_options_equal(&v_curr->br_mcast_ctx,
1726+ &r_end->br_mcast_ctx);
1727+}
1728+
1729+bool br_vlan_global_opts_fill(struct sk_buff *skb, u16 vid, u16 vid_range,
1730+ const struct net_bridge_vlan *v_opts)
1731+{
1732+ struct nlattr *nest2 __maybe_unused;
1733+ u64 clockval __maybe_unused;
1734+ struct nlattr *nest;
1735+
1736+ nest = nla_nest_start(skb, BRIDGE_VLANDB_GLOBAL_OPTIONS);
1737+ if (!nest)
1738+ return false;
1739+
1740+ if (nla_put_u16(skb, BRIDGE_VLANDB_GOPTS_ID, vid))
1741+ goto out_err;
1742+
1743+ if (vid_range && vid < vid_range &&
1744+ nla_put_u16(skb, BRIDGE_VLANDB_GOPTS_RANGE, vid_range))
1745+ goto out_err;
1746+
1747+#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
1748+ clockval = jiffies_to_clock_t(v_opts->br_mcast_ctx.multicast_last_member_interval);
1749+ if (nla_put_u64_64bit(skb, BRIDGE_VLANDB_GOPTS_MCAST_LAST_MEMBER_INTVL,
1750+ clockval, BRIDGE_VLANDB_GOPTS_PAD))
1751+ goto out_err;
1752+ clockval = jiffies_to_clock_t(v_opts->br_mcast_ctx.multicast_membership_interval);
1753+ if (nla_put_u64_64bit(skb, BRIDGE_VLANDB_GOPTS_MCAST_MEMBERSHIP_INTVL,
1754+ clockval, BRIDGE_VLANDB_GOPTS_PAD))
1755+ goto out_err;
1756+ clockval = jiffies_to_clock_t(v_opts->br_mcast_ctx.multicast_querier_interval);
1757+ if (nla_put_u64_64bit(skb, BRIDGE_VLANDB_GOPTS_MCAST_QUERIER_INTVL,
1758+ clockval, BRIDGE_VLANDB_GOPTS_PAD))
1759+ goto out_err;
1760+ clockval = jiffies_to_clock_t(v_opts->br_mcast_ctx.multicast_query_interval);
1761+ if (nla_put_u64_64bit(skb, BRIDGE_VLANDB_GOPTS_MCAST_QUERY_INTVL,
1762+ clockval, BRIDGE_VLANDB_GOPTS_PAD))
1763+ goto out_err;
1764+ clockval = jiffies_to_clock_t(v_opts->br_mcast_ctx.multicast_query_response_interval);
1765+ if (nla_put_u64_64bit(skb, BRIDGE_VLANDB_GOPTS_MCAST_QUERY_RESPONSE_INTVL,
1766+ clockval, BRIDGE_VLANDB_GOPTS_PAD))
1767+ goto out_err;
1768+ clockval = jiffies_to_clock_t(v_opts->br_mcast_ctx.multicast_startup_query_interval);
1769+ if (nla_put_u64_64bit(skb, BRIDGE_VLANDB_GOPTS_MCAST_STARTUP_QUERY_INTVL,
1770+ clockval, BRIDGE_VLANDB_GOPTS_PAD))
1771+ goto out_err;
1772+
1773+#if IS_ENABLED(CONFIG_IPV6)
1774+ if (nla_put_u8(skb, BRIDGE_VLANDB_GOPTS_MCAST_MLD_VERSION,
1775+ v_opts->br_mcast_ctx.multicast_mld_version))
1776+ goto out_err;
1777+#endif
1778+#endif
1779+
1780+ nla_nest_end(skb, nest);
1781+
1782+ return true;
1783+
1784+out_err:
1785+ nla_nest_cancel(skb, nest);
1786+ return false;
1787+}
1788diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
1789index dbc9b2f..706b207 100644
1790--- a/net/core/rtnetlink.c
1791+++ b/net/core/rtnetlink.c
1792@@ -1996,6 +1996,7 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
1793 goto cont;
1794 if (idx < s_idx)
1795 goto cont;
1796+
1797 err = rtnl_fill_ifinfo(skb, dev, net,
1798 RTM_NEWLINK,
1799 NETLINK_CB(cb->skb).portid,
1800diff --git a/net/dsa/slave.c b/net/dsa/slave.c
1801index 2dfaa1e..a60a26c 100644
1802--- a/net/dsa/slave.c
1803+++ b/net/dsa/slave.c
1804@@ -1495,8 +1495,19 @@ int dsa_slave_create(struct dsa_port *port)
1805 goto out_phy;
1806 }
1807
1808+ rtnl_lock();
1809+
1810+ ret = netdev_upper_dev_link(master, slave_dev, NULL);
1811+
1812+ rtnl_unlock();
1813+
1814+ if (ret)
1815+ goto out_unregister;
1816+
1817 return 0;
1818
1819+out_unregister:
1820+ unregister_netdev(slave_dev);
1821 out_phy:
1822 rtnl_lock();
1823 phylink_disconnect_phy(p->dp->pl);