blob: 11c2c8e4694d3d57550579fee6c6c9364c74c9f2 [file] [log] [blame]
developer58aa0682023-09-18 14:02:26 +08001From 0a78a8c88b4db2a49aad7544c085078d65dbd343 Mon Sep 17 00:00:00 2001
2From: mtk22468 <Xuzhu.Wang@mediatek.com>
3Date: Mon, 18 Sep 2023 10:50:36 +0800
4Subject: [PATCH 01/22] ovs add multicast to unicast support
5
6---
7 net/openvswitch/actions.c | 30 ++++
8 net/openvswitch/datapath.c | 289 +++++++++++++++++++++++++++++++++++++
9 net/openvswitch/datapath.h | 32 ++++
10 net/openvswitch/vport.c | 8 +
11 net/openvswitch/vport.h | 26 ++++
12 5 files changed, 385 insertions(+)
13
developer8efbb8e2022-10-17 22:55:12 +080014diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c
developer0a744a82023-01-10 15:08:53 +080015index 9e8a5c4..d5bf30d 100644
developer8efbb8e2022-10-17 22:55:12 +080016--- a/net/openvswitch/actions.c
17+++ b/net/openvswitch/actions.c
18@@ -919,6 +919,10 @@ static void do_output(struct datapath *dp, struct sk_buff *skb, int out_port,
19 struct sw_flow_key *key)
20 {
21 struct vport *vport = ovs_vport_rcu(dp, out_port);
22+ struct multicast_data_base *mdb;
23+ struct multicast_table *table;
24+ struct multicast_table_entry *entry;
25+ struct sk_buff *skb_cpy;
26
27 if (likely(vport)) {
28 u16 mru = OVS_CB(skb)->mru;
developer0a744a82023-01-10 15:08:53 +080029@@ -933,6 +937,32 @@ static void do_output(struct datapath *dp, struct sk_buff *skb, int out_port,
developer8efbb8e2022-10-17 22:55:12 +080030
31 if (likely(!mru ||
32 (skb->len <= mru + vport->dev->hard_header_len))) {
developere1abd052022-11-17 14:17:21 +080033+ if (is_multicast_addr(skb) && !is_igmp_mld(skb)) {
developer8efbb8e2022-10-17 22:55:12 +080034+ mdb = vport->mdb;
35+ spin_lock(&mdb->tbl_lock);
36+ list_for_each_entry(table, &mdb->list_head, mdb_node) {
developere1abd052022-11-17 14:17:21 +080037+ if ((key->eth.type == htons(ETH_P_IP) &&
38+ table->group_addr.u.ip4 == key->ipv4.addr.dst) ||
39+ (key->eth.type == htons(ETH_P_IPV6) &&
40+ ipv6_addr_equal(&table->group_addr.u.ip6, &key->ipv6.addr.dst))) {
developer8efbb8e2022-10-17 22:55:12 +080041+ list_for_each_entry(entry, &table->entry_list, entry_node) {
42+ skb_cpy = skb_copy(skb, GFP_ATOMIC);
43+ if (!skb_cpy) {
44+ kfree_skb(skb);
developere1abd052022-11-17 14:17:21 +080045+ pr_err("%s(): skb copy error\n", __func__);
developer8efbb8e2022-10-17 22:55:12 +080046+ spin_unlock(&mdb->tbl_lock);
47+ return;
48+ }
49+ memcpy(skb_cpy->data, entry->eth_addr, ETH_ALEN);
50+ ovs_vport_send(vport, skb_cpy, ovs_key_mac_proto(key));
51+ }
developer0a744a82023-01-10 15:08:53 +080052+ spin_unlock(&mdb->tbl_lock);
53+ kfree_skb(skb);
54+ return;
developer8efbb8e2022-10-17 22:55:12 +080055+ }
56+ }
57+ spin_unlock(&mdb->tbl_lock);
developer0a744a82023-01-10 15:08:53 +080058+ }
59 ovs_vport_send(vport, skb, ovs_key_mac_proto(key));
developer8efbb8e2022-10-17 22:55:12 +080060 } else if (mru <= vport->dev->mtu) {
61 struct net *net = read_pnet(&dp->net);
developer8efbb8e2022-10-17 22:55:12 +080062diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
developer58aa0682023-09-18 14:02:26 +080063index 4c537e7..40b4795 100644
developer8efbb8e2022-10-17 22:55:12 +080064--- a/net/openvswitch/datapath.c
65+++ b/net/openvswitch/datapath.c
developere1abd052022-11-17 14:17:21 +080066@@ -11,6 +11,9 @@
developer8efbb8e2022-10-17 22:55:12 +080067 #include <linux/if_vlan.h>
68 #include <linux/in.h>
69 #include <linux/ip.h>
70+#include <linux/igmp.h>
developere1abd052022-11-17 14:17:21 +080071+#include <net/mld.h>
72+#include <linux/icmpv6.h>
developer8efbb8e2022-10-17 22:55:12 +080073 #include <linux/jhash.h>
74 #include <linux/delay.h>
75 #include <linux/time.h>
developer58aa0682023-09-18 14:02:26 +080076@@ -538,6 +541,270 @@ static int queue_userspace_packet(struct datapath *dp, struct sk_buff *skb,
developer8efbb8e2022-10-17 22:55:12 +080077 return err;
78 }
79
developere1abd052022-11-17 14:17:21 +080080+static int ovs_multicast_add_group(struct ip_addr *_group_addr,
81+ const u8 *entry_addr,
82+ struct vport *input_vport)
developer8efbb8e2022-10-17 22:55:12 +080083+{
84+ struct multicast_data_base *mdb;
85+ struct multicast_table *table;
86+ struct multicast_table_entry *entry;
developere1abd052022-11-17 14:17:21 +080087+ int err;
developer8efbb8e2022-10-17 22:55:12 +080088+
89+ mdb = input_vport->mdb;
90+ spin_lock(&mdb->tbl_lock);
91+ list_for_each_entry(table, &mdb->list_head, mdb_node) {
developere1abd052022-11-17 14:17:21 +080092+ if (!memcmp(&table->group_addr.u, &_group_addr->u, sizeof(struct ip_addr))) {
developer8efbb8e2022-10-17 22:55:12 +080093+ list_for_each_entry(entry, &table->entry_list, entry_node) {
developere1abd052022-11-17 14:17:21 +080094+ if (ether_addr_equal(entry->eth_addr, entry_addr))
95+ goto out;
developer8efbb8e2022-10-17 22:55:12 +080096+ }
developere1abd052022-11-17 14:17:21 +080097+
developer8efbb8e2022-10-17 22:55:12 +080098+ entry = kzalloc(sizeof(struct multicast_table_entry), GFP_ATOMIC);
99+ if (!entry) {
developere1abd052022-11-17 14:17:21 +0800100+ err = -ENOMEM;
101+ goto err;
developer8efbb8e2022-10-17 22:55:12 +0800102+ }
103+
104+ memcpy(entry->eth_addr, entry_addr, ETH_ALEN);
105+ list_add(&entry->entry_node, &table->entry_list);
developere1abd052022-11-17 14:17:21 +0800106+ goto out;
developer8efbb8e2022-10-17 22:55:12 +0800107+ }
108+ }
109+
110+ table = kzalloc(sizeof(struct multicast_table), GFP_ATOMIC);
111+ if (!table) {
developere1abd052022-11-17 14:17:21 +0800112+ err = -ENOMEM;
113+ goto err;
developer8efbb8e2022-10-17 22:55:12 +0800114+ }
115+
116+ INIT_LIST_HEAD(&table->entry_list);
117+ entry = kzalloc(sizeof(struct multicast_table_entry), GFP_ATOMIC);
118+ if (!entry) {
119+ kfree(table);
developere1abd052022-11-17 14:17:21 +0800120+ err = -ENOMEM;
121+ goto err;
developer8efbb8e2022-10-17 22:55:12 +0800122+ }
123+
124+ memcpy(entry->eth_addr, entry_addr, ETH_ALEN);
125+ list_add(&entry->entry_node, &table->entry_list);
126+
developere1abd052022-11-17 14:17:21 +0800127+ table->group_addr.u = _group_addr->u;
developer8efbb8e2022-10-17 22:55:12 +0800128+ list_add(&table->mdb_node, &mdb->list_head);
129+
developere1abd052022-11-17 14:17:21 +0800130+out:
131+ err = 0;
132+err:
developer8efbb8e2022-10-17 22:55:12 +0800133+ spin_unlock(&mdb->tbl_lock);
developere1abd052022-11-17 14:17:21 +0800134+ return err;
developer8efbb8e2022-10-17 22:55:12 +0800135+}
136+
developere1abd052022-11-17 14:17:21 +0800137+static int ovs_multicast_leave_group(struct ip_addr *_group_addr,
138+ const u8 *entry_addr,
139+ struct vport *input_vport)
developer8efbb8e2022-10-17 22:55:12 +0800140+{
141+ struct multicast_data_base *mdb;
142+ struct multicast_table *table, *table_tmp;
143+ struct multicast_table_entry *entry, *entry_tmp;
developere1abd052022-11-17 14:17:21 +0800144+ int err;
developer8efbb8e2022-10-17 22:55:12 +0800145+
146+ mdb = input_vport->mdb;
147+ spin_lock(&mdb->tbl_lock);
148+ list_for_each_entry_safe(table, table_tmp, &mdb->list_head, mdb_node) {
developere1abd052022-11-17 14:17:21 +0800149+ if (!memcmp(&table->group_addr.u, &_group_addr->u, sizeof(struct ip_addr))) {
developer8efbb8e2022-10-17 22:55:12 +0800150+ list_for_each_entry_safe(entry, entry_tmp, &table->entry_list, entry_node) {
developere1abd052022-11-17 14:17:21 +0800151+ if (ether_addr_equal(entry->eth_addr, entry_addr)) {
developer8efbb8e2022-10-17 22:55:12 +0800152+ list_del(&entry->entry_node);
153+ kfree(entry);
154+
155+ if (list_empty(&table->entry_list)) {
156+ list_del(&table->mdb_node);
157+ kfree(table);
158+ }
developere1abd052022-11-17 14:17:21 +0800159+
160+ goto out;
developer8efbb8e2022-10-17 22:55:12 +0800161+ }
162+ }
163+ }
164+ }
developere1abd052022-11-17 14:17:21 +0800165+
166+out:
167+ err = 0;
developer8efbb8e2022-10-17 22:55:12 +0800168+ spin_unlock(&mdb->tbl_lock);
developere1abd052022-11-17 14:17:21 +0800169+ return err;
developer8efbb8e2022-10-17 22:55:12 +0800170+}
171+
172+static int ovs_multicast_ipv4_rcv(struct sk_buff *skb, struct vport *input_vport)
173+{
174+ struct ethhdr *eth_hdr;
175+ const u8 *dl_src;
developere1abd052022-11-17 14:17:21 +0800176+ struct ip_addr group_addr = {0};
developer8efbb8e2022-10-17 22:55:12 +0800177+ struct iphdr *ip_header;
developere1abd052022-11-17 14:17:21 +0800178+ struct igmphdr *igmp_header;
developer8efbb8e2022-10-17 22:55:12 +0800179+ int i;
180+ struct igmpv3_report *igmpv3_hdr;
181+ u16 group_num;
182+ struct igmpv3_grec *grec;
183+ u8 group_type;
184+ u8 aux_data_len;
185+ u16 num_of_source;
186+ int err;
187+
188+ err = ip_mc_check_igmp(skb);
developere1abd052022-11-17 14:17:21 +0800189+ if (err)
developer8efbb8e2022-10-17 22:55:12 +0800190+ return 0;
191+
192+ eth_hdr = skb_eth_hdr(skb);
193+ dl_src = eth_hdr->h_source;
developere1abd052022-11-17 14:17:21 +0800194+ ip_header = ip_hdr(skb);
195+ igmp_header = igmp_hdr(skb);
developer8efbb8e2022-10-17 22:55:12 +0800196+
developere1abd052022-11-17 14:17:21 +0800197+ switch (igmp_header->type) {
developer8efbb8e2022-10-17 22:55:12 +0800198+ case IGMP_HOST_MEMBERSHIP_REPORT:
199+ case IGMPV2_HOST_MEMBERSHIP_REPORT:
developere1abd052022-11-17 14:17:21 +0800200+ group_addr.u.ip4 = igmp_header->group;
201+ if (ipv4_is_local_multicast(group_addr.u.ip4))
202+ return 0;
203+ ovs_multicast_add_group(&group_addr, dl_src, input_vport);
developer8efbb8e2022-10-17 22:55:12 +0800204+ break;
205+ case IGMP_HOST_LEAVE_MESSAGE:
developere1abd052022-11-17 14:17:21 +0800206+ group_addr.u.ip4 = igmp_header->group;
207+ if (ipv4_is_local_multicast(group_addr.u.ip4))
208+ return 0;
209+ ovs_multicast_leave_group(&group_addr, dl_src, input_vport);
developer8efbb8e2022-10-17 22:55:12 +0800210+ break;
211+ case IGMPV3_HOST_MEMBERSHIP_REPORT:
developere1abd052022-11-17 14:17:21 +0800212+ igmpv3_hdr = (struct igmpv3_report *)igmp_header;
developer8efbb8e2022-10-17 22:55:12 +0800213+ group_num = ntohs(igmpv3_hdr->ngrec);
214+ grec = igmpv3_hdr->grec;
developere1abd052022-11-17 14:17:21 +0800215+
developer8efbb8e2022-10-17 22:55:12 +0800216+ for (i = 0; i < group_num; i++) {
217+ group_type = grec->grec_type;
218+ aux_data_len = grec->grec_auxwords;
219+ num_of_source = ntohs(grec->grec_nsrcs);
developere1abd052022-11-17 14:17:21 +0800220+ group_addr.u.ip4 = grec->grec_mca;
221+ if (ipv4_is_local_multicast(group_addr.u.ip4))
222+ return 0;
developer8efbb8e2022-10-17 22:55:12 +0800223+
224+ if (group_type == IGMPV3_MODE_IS_EXCLUDE ||
225+ group_type == IGMPV3_CHANGE_TO_EXCLUDE ||
226+ group_type == IGMPV3_ALLOW_NEW_SOURCES)
developere1abd052022-11-17 14:17:21 +0800227+ ovs_multicast_add_group(&group_addr, dl_src, input_vport);
developer8efbb8e2022-10-17 22:55:12 +0800228+
229+ if (group_type == IGMPV3_MODE_IS_INCLUDE ||
230+ group_type == IGMPV3_CHANGE_TO_INCLUDE ||
231+ group_type == IGMPV3_BLOCK_OLD_SOURCES)
232+ if (num_of_source == 0)
developere1abd052022-11-17 14:17:21 +0800233+ ovs_multicast_leave_group(&group_addr, dl_src, input_vport);
developer8efbb8e2022-10-17 22:55:12 +0800234+
235+ grec += (8 + (num_of_source * 4) + aux_data_len);
236+ }
237+ break;
developere1abd052022-11-17 14:17:21 +0800238+ case IGMP_HOST_MEMBERSHIP_QUERY:
239+ break;
240+ default:
241+ pr_warning("%s(): error packet type 0x%x\n", __func__, igmp_header->type);
242+ break;
243+ }
244+ return 0;
245+}
246+
247+static int ovs_multicast_ipv6_rcv(struct sk_buff *skb, struct vport *input_vport)
248+{
249+ const u8 *dl_src;
250+ struct mld_msg *mld_hdr;
251+ struct ip_addr group_addr = {0};
252+ struct icmp6hdr *icmpv6_hdr;
253+ u16 group_num;
254+ struct mld2_grec *grec;
255+ u8 group_type;
256+ u8 aux_data_len;
257+ u16 num_of_source;
258+ int i;
259+ int err;
260+
261+ err = ipv6_mc_check_mld(skb);
262+ if (err)
263+ return err;
264+
265+ mld_hdr = (struct mld_msg *)skb_transport_header(skb);
266+ dl_src = skb_eth_hdr(skb)->h_source;
267+
268+ switch (mld_hdr->mld_type) {
269+ case ICMPV6_MGM_REPORT:
270+ group_addr.u.ip6 = mld_hdr->mld_mca;
271+ if (ipv6_addr_is_ll_all_nodes(&group_addr.u.ip6))
272+ return 0;
273+ ovs_multicast_add_group(&group_addr, dl_src, input_vport);
274+ break;
275+ case ICMPV6_MGM_REDUCTION:
276+ group_addr.u.ip6 = mld_hdr->mld_mca;
277+ if (ipv6_addr_is_ll_all_nodes(&group_addr.u.ip6))
278+ return 0;
279+ ovs_multicast_leave_group(&group_addr, dl_src, input_vport);
280+ break;
281+ case ICMPV6_MLD2_REPORT:
282+ icmpv6_hdr = icmp6_hdr(skb);
283+ group_num = ntohs(icmpv6_hdr->icmp6_dataun.un_data16[1]);
284+ grec = (struct mld2_grec *)(skb_transport_header(skb) + sizeof(struct icmp6hdr));
285+
286+ for (i = 0; i < group_num; i++) {
287+ group_type = grec->grec_type;
288+ aux_data_len = grec->grec_auxwords;
289+ num_of_source = ntohs(grec->grec_nsrcs);
290+ group_addr.u.ip6 = grec->grec_mca;
291+ if (ipv6_addr_is_ll_all_nodes(&group_addr.u.ip6))
292+ return 0;
293+
developer0a744a82023-01-10 15:08:53 +0800294+ if ((group_type == MLD2_MODE_IS_EXCLUDE ||
developere1abd052022-11-17 14:17:21 +0800295+ group_type == MLD2_CHANGE_TO_EXCLUDE ||
developer0a744a82023-01-10 15:08:53 +0800296+ group_type == MLD2_ALLOW_NEW_SOURCES) &&
297+ num_of_source == 0)
developere1abd052022-11-17 14:17:21 +0800298+ ovs_multicast_add_group(&group_addr, dl_src, input_vport);
299+ else if ((group_type == MLD2_MODE_IS_INCLUDE ||
300+ group_type == MLD2_CHANGE_TO_INCLUDE ||
301+ group_type == MLD2_BLOCK_OLD_SOURCES) &&
302+ num_of_source == 0)
303+ ovs_multicast_leave_group(&group_addr, dl_src, input_vport);
developer0a744a82023-01-10 15:08:53 +0800304+ else {
305+ pr_info("%s(): group_type(%d), aux_data_len(%d),\
306+ num_of_source(%d), group_addr(%pI6)\n",
307+ __func__, group_type, aux_data_len,
308+ num_of_source, &group_addr.u.ip6);
309+ return 0;
310+ }
311+ grec = (struct mld2_grec *)((u8 *)grec + sizeof(struct mld2_grec)
312+ + aux_data_len * sizeof(u32));
developere1abd052022-11-17 14:17:21 +0800313+ }
314+ break;
315+ case ICMPV6_MGM_QUERY:
316+ break;
developer8efbb8e2022-10-17 22:55:12 +0800317+ default:
developere1abd052022-11-17 14:17:21 +0800318+ pr_warning("%s(): error packet type 0x%x\n", __func__, mld_hdr->mld_type);
developer8efbb8e2022-10-17 22:55:12 +0800319+ break;
320+ }
developere1abd052022-11-17 14:17:21 +0800321+
developer8efbb8e2022-10-17 22:55:12 +0800322+ return 0;
323+}
324+
developere1abd052022-11-17 14:17:21 +0800325+static int ovs_multicast_rcv(struct sk_buff *skb, struct vport *input_vport)
326+{
327+ int ret = 0;
328+
329+ if (!skb)
330+ return -EINVAL;
331+
332+ switch (skb->protocol) {
333+ case htons(ETH_P_IP):
334+ ret = ovs_multicast_ipv4_rcv(skb, input_vport);
335+ break;
336+ case htons(ETH_P_IPV6):
337+ ret = ovs_multicast_ipv6_rcv(skb, input_vport);
338+ break;
339+ }
340+
341+ return ret;
342+}
343+
developer8efbb8e2022-10-17 22:55:12 +0800344 static int ovs_packet_cmd_execute(struct sk_buff *skb, struct genl_info *info)
345 {
346 struct ovs_header *ovs_header = info->userhdr;
developer58aa0682023-09-18 14:02:26 +0800347@@ -612,6 +879,9 @@ static int ovs_packet_cmd_execute(struct sk_buff *skb, struct genl_info *info)
developer8efbb8e2022-10-17 22:55:12 +0800348 OVS_CB(packet)->input_vport = input_vport;
349 sf_acts = rcu_dereference(flow->sf_acts);
350
developere1abd052022-11-17 14:17:21 +0800351+ if (is_multicast_addr(packet))
352+ ovs_multicast_rcv(packet, input_vport);
developer8efbb8e2022-10-17 22:55:12 +0800353+
354 local_bh_disable();
355 err = ovs_execute_actions(dp, packet, sf_acts, &flow->key);
356 local_bh_enable();
developer58aa0682023-09-18 14:02:26 +0800357@@ -2199,6 +2469,9 @@ static int ovs_vport_cmd_del(struct sk_buff *skb, struct genl_info *info)
developer8efbb8e2022-10-17 22:55:12 +0800358 struct datapath *dp;
359 struct vport *vport;
360 unsigned int new_headroom;
361+ struct multicast_data_base *mdb;
362+ struct multicast_table *table, *table_tmp;
363+ struct multicast_table_entry *entry, *entry_tmp;
364 int err;
365
366 reply = ovs_vport_cmd_alloc_info();
developer58aa0682023-09-18 14:02:26 +0800367@@ -2226,6 +2499,22 @@ static int ovs_vport_cmd_del(struct sk_buff *skb, struct genl_info *info)
developer8efbb8e2022-10-17 22:55:12 +0800368 if (netdev_get_fwd_headroom(vport->dev) == dp->max_headroom)
369 update_headroom = true;
370
371+ mdb = vport->mdb;
372+ spin_lock(&mdb->tbl_lock);
373+ list_for_each_entry_safe(table, table_tmp, &mdb->list_head, mdb_node) {
374+ list_for_each_entry_safe(entry, entry_tmp, &table->entry_list, entry_node) {
375+ list_del(&entry->entry_node);
376+ kfree(entry);
377+
378+ if (list_empty(&table->entry_list)) {
379+ list_del(&table->mdb_node);
380+ kfree(table);
381+ }
382+ }
383+ }
384+ spin_unlock(&mdb->tbl_lock);
385+ kfree(mdb);
386+
387 netdev_reset_rx_headroom(vport->dev);
388 ovs_dp_detach_port(vport);
389
390diff --git a/net/openvswitch/datapath.h b/net/openvswitch/datapath.h
developere1abd052022-11-17 14:17:21 +0800391index 81e85dd..6830d3b 100644
developer8efbb8e2022-10-17 22:55:12 +0800392--- a/net/openvswitch/datapath.h
393+++ b/net/openvswitch/datapath.h
developere1abd052022-11-17 14:17:21 +0800394@@ -215,6 +215,38 @@ static inline struct datapath *get_dp(struct net *net, int dp_ifindex)
developer8efbb8e2022-10-17 22:55:12 +0800395 return dp;
396 }
397
developere1abd052022-11-17 14:17:21 +0800398+static inline bool is_multicast_addr(struct sk_buff *skb)
developer8efbb8e2022-10-17 22:55:12 +0800399+{
developere1abd052022-11-17 14:17:21 +0800400+ struct ethhdr *eth_hdr;
developer8efbb8e2022-10-17 22:55:12 +0800401+
developere1abd052022-11-17 14:17:21 +0800402+ if (!skb)
403+ return 0;
404+
405+ eth_hdr = skb_eth_hdr(skb);
406+
407+ return (eth_hdr->h_dest[0] == 0x01 && skb->protocol == htons(ETH_P_IP)) ||
408+ (eth_hdr->h_dest[0] == 0x33 && skb->protocol == htons(ETH_P_IPV6));
developer8efbb8e2022-10-17 22:55:12 +0800409+}
410+
developere1abd052022-11-17 14:17:21 +0800411+static inline bool is_igmp_mld(struct sk_buff *skb)
developer8efbb8e2022-10-17 22:55:12 +0800412+{
413+ struct ethhdr *eth_hdr;
developere1abd052022-11-17 14:17:21 +0800414+ int err = 0;
developer8efbb8e2022-10-17 22:55:12 +0800415+
416+ if (!skb)
developere1abd052022-11-17 14:17:21 +0800417+ return err;
developer8efbb8e2022-10-17 22:55:12 +0800418+
419+ eth_hdr = skb_eth_hdr(skb);
420+
developere1abd052022-11-17 14:17:21 +0800421+ if (skb->protocol == htons(ETH_P_IP)) {
422+ err = ip_hdr(skb)->protocol == IPPROTO_IGMP;
423+ } else if (skb->protocol == htons(ETH_P_IPV6)) {
424+ err = !ipv6_mc_check_mld(skb);
425+ }
426+
427+ return err;
developer8efbb8e2022-10-17 22:55:12 +0800428+}
429+
430 extern struct notifier_block ovs_dp_device_notifier;
431 extern struct genl_family dp_vport_genl_family;
432
433diff --git a/net/openvswitch/vport.c b/net/openvswitch/vport.c
434index 19af0ef..77bc923 100644
435--- a/net/openvswitch/vport.c
436+++ b/net/openvswitch/vport.c
437@@ -141,6 +141,14 @@ struct vport *ovs_vport_alloc(int priv_size, const struct vport_ops *ops,
438 return ERR_PTR(-EINVAL);
439 }
440
441+ vport->mdb = kzalloc(sizeof(struct multicast_data_base), GFP_KERNEL);
442+ if (!vport->mdb) {
443+ kfree(vport);
444+ return ERR_PTR(-ENOMEM);
445+ }
446+ INIT_LIST_HEAD(&vport->mdb->list_head);
447+ spin_lock_init(&vport->mdb->tbl_lock);
448+
449 return vport;
450 }
451 EXPORT_SYMBOL_GPL(ovs_vport_alloc);
452diff --git a/net/openvswitch/vport.h b/net/openvswitch/vport.h
453index 1eb7495..eb69d6c 100644
454--- a/net/openvswitch/vport.h
455+++ b/net/openvswitch/vport.h
456@@ -55,6 +55,30 @@ struct vport_portids {
457 u32 ids[];
458 };
459
460+struct ip_addr {
461+ union {
462+ __be32 ip4;
463+ struct in6_addr ip6;
464+ } u;
465+};
466+
467+struct multicast_table_entry {
468+ struct list_head entry_node;
469+ u8 eth_addr[ETH_ALEN];
470+};
471+
472+struct multicast_table {
473+ struct list_head mdb_node;
474+ struct list_head entry_list;
475+ struct ip_addr group_addr;
476+};
477+
478+struct multicast_data_base {
479+ struct list_head list_head;
480+ spinlock_t tbl_lock;
481+};
482+
483+
484 /**
485 * struct vport - one port within a datapath
486 * @dev: Pointer to net_device.
487@@ -79,6 +103,8 @@ struct vport {
488
489 struct list_head detach_list;
490 struct rcu_head rcu;
491+
492+ struct multicast_data_base *mdb;
493 };
494
495 /**
developer58aa0682023-09-18 14:02:26 +0800496--
4972.18.0
498