xref: /linux/net/bridge/br_mdb.c (revision d8d9ba8dc9c77358cd7ea73e4e44e8952c9baf35)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/err.h>
3 #include <linux/igmp.h>
4 #include <linux/kernel.h>
5 #include <linux/netdevice.h>
6 #include <linux/rculist.h>
7 #include <linux/skbuff.h>
8 #include <linux/if_ether.h>
9 #include <net/ip.h>
10 #include <net/netlink.h>
11 #include <net/switchdev.h>
12 #if IS_ENABLED(CONFIG_IPV6)
13 #include <net/ipv6.h>
14 #include <net/addrconf.h>
15 #endif
16 
17 #include "br_private.h"
18 
19 static bool
20 br_ip4_rports_get_timer(struct net_bridge_mcast_port *pmctx,
21 			unsigned long *timer)
22 {
23 	*timer = br_timer_value(&pmctx->ip4_mc_router_timer);
24 	return !hlist_unhashed(&pmctx->ip4_rlist);
25 }
26 
27 static bool
28 br_ip6_rports_get_timer(struct net_bridge_mcast_port *pmctx,
29 			unsigned long *timer)
30 {
31 #if IS_ENABLED(CONFIG_IPV6)
32 	*timer = br_timer_value(&pmctx->ip6_mc_router_timer);
33 	return !hlist_unhashed(&pmctx->ip6_rlist);
34 #else
35 	*timer = 0;
36 	return false;
37 #endif
38 }
39 
40 int br_rports_fill_info(struct sk_buff *skb,
41 			const struct net_bridge_mcast *brmctx)
42 {
43 	u16 vid = brmctx->vlan ? brmctx->vlan->vid : 0;
44 	bool have_ip4_mc_rtr, have_ip6_mc_rtr;
45 	unsigned long ip4_timer, ip6_timer;
46 	struct nlattr *nest, *port_nest;
47 	struct net_bridge_port *p;
48 
49 	if (!brmctx->multicast_router || !br_rports_have_mc_router(brmctx))
50 		return 0;
51 
52 	nest = nla_nest_start_noflag(skb, MDBA_ROUTER);
53 	if (nest == NULL)
54 		return -EMSGSIZE;
55 
56 	list_for_each_entry_rcu(p, &brmctx->br->port_list, list) {
57 		struct net_bridge_mcast_port *pmctx;
58 
59 		if (vid) {
60 			struct net_bridge_vlan *v;
61 
62 			v = br_vlan_find(nbp_vlan_group(p), vid);
63 			if (!v)
64 				continue;
65 			pmctx = &v->port_mcast_ctx;
66 		} else {
67 			pmctx = &p->multicast_ctx;
68 		}
69 
70 		have_ip4_mc_rtr = br_ip4_rports_get_timer(pmctx, &ip4_timer);
71 		have_ip6_mc_rtr = br_ip6_rports_get_timer(pmctx, &ip6_timer);
72 
73 		if (!have_ip4_mc_rtr && !have_ip6_mc_rtr)
74 			continue;
75 
76 		port_nest = nla_nest_start_noflag(skb, MDBA_ROUTER_PORT);
77 		if (!port_nest)
78 			goto fail;
79 
80 		if (nla_put_nohdr(skb, sizeof(u32), &p->dev->ifindex) ||
81 		    nla_put_u32(skb, MDBA_ROUTER_PATTR_TIMER,
82 				max(ip4_timer, ip6_timer)) ||
83 		    nla_put_u8(skb, MDBA_ROUTER_PATTR_TYPE,
84 			       p->multicast_ctx.multicast_router) ||
85 		    (have_ip4_mc_rtr &&
86 		     nla_put_u32(skb, MDBA_ROUTER_PATTR_INET_TIMER,
87 				 ip4_timer)) ||
88 		    (have_ip6_mc_rtr &&
89 		     nla_put_u32(skb, MDBA_ROUTER_PATTR_INET6_TIMER,
90 				 ip6_timer)) ||
91 		    (vid && nla_put_u16(skb, MDBA_ROUTER_PATTR_VID, vid))) {
92 			nla_nest_cancel(skb, port_nest);
93 			goto fail;
94 		}
95 		nla_nest_end(skb, port_nest);
96 	}
97 
98 	nla_nest_end(skb, nest);
99 	return 0;
100 fail:
101 	nla_nest_cancel(skb, nest);
102 	return -EMSGSIZE;
103 }
104 
105 static void __mdb_entry_fill_flags(struct br_mdb_entry *e, unsigned char flags)
106 {
107 	e->state = flags & MDB_PG_FLAGS_PERMANENT;
108 	e->flags = 0;
109 	if (flags & MDB_PG_FLAGS_OFFLOAD)
110 		e->flags |= MDB_FLAGS_OFFLOAD;
111 	if (flags & MDB_PG_FLAGS_FAST_LEAVE)
112 		e->flags |= MDB_FLAGS_FAST_LEAVE;
113 	if (flags & MDB_PG_FLAGS_STAR_EXCL)
114 		e->flags |= MDB_FLAGS_STAR_EXCL;
115 	if (flags & MDB_PG_FLAGS_BLOCKED)
116 		e->flags |= MDB_FLAGS_BLOCKED;
117 }
118 
119 static void __mdb_entry_to_br_ip(struct br_mdb_entry *entry, struct br_ip *ip,
120 				 struct nlattr **mdb_attrs)
121 {
122 	memset(ip, 0, sizeof(struct br_ip));
123 	ip->vid = entry->vid;
124 	ip->proto = entry->addr.proto;
125 	switch (ip->proto) {
126 	case htons(ETH_P_IP):
127 		ip->dst.ip4 = entry->addr.u.ip4;
128 		if (mdb_attrs && mdb_attrs[MDBE_ATTR_SOURCE])
129 			ip->src.ip4 = nla_get_in_addr(mdb_attrs[MDBE_ATTR_SOURCE]);
130 		break;
131 #if IS_ENABLED(CONFIG_IPV6)
132 	case htons(ETH_P_IPV6):
133 		ip->dst.ip6 = entry->addr.u.ip6;
134 		if (mdb_attrs && mdb_attrs[MDBE_ATTR_SOURCE])
135 			ip->src.ip6 = nla_get_in6_addr(mdb_attrs[MDBE_ATTR_SOURCE]);
136 		break;
137 #endif
138 	default:
139 		ether_addr_copy(ip->dst.mac_addr, entry->addr.u.mac_addr);
140 	}
141 
142 }
143 
144 static int __mdb_fill_srcs(struct sk_buff *skb,
145 			   struct net_bridge_port_group *p)
146 {
147 	struct net_bridge_group_src *ent;
148 	struct nlattr *nest, *nest_ent;
149 
150 	if (hlist_empty(&p->src_list))
151 		return 0;
152 
153 	nest = nla_nest_start(skb, MDBA_MDB_EATTR_SRC_LIST);
154 	if (!nest)
155 		return -EMSGSIZE;
156 
157 	hlist_for_each_entry_rcu(ent, &p->src_list, node,
158 				 lockdep_is_held(&p->key.port->br->multicast_lock)) {
159 		nest_ent = nla_nest_start(skb, MDBA_MDB_SRCLIST_ENTRY);
160 		if (!nest_ent)
161 			goto out_cancel_err;
162 		switch (ent->addr.proto) {
163 		case htons(ETH_P_IP):
164 			if (nla_put_in_addr(skb, MDBA_MDB_SRCATTR_ADDRESS,
165 					    ent->addr.src.ip4)) {
166 				nla_nest_cancel(skb, nest_ent);
167 				goto out_cancel_err;
168 			}
169 			break;
170 #if IS_ENABLED(CONFIG_IPV6)
171 		case htons(ETH_P_IPV6):
172 			if (nla_put_in6_addr(skb, MDBA_MDB_SRCATTR_ADDRESS,
173 					     &ent->addr.src.ip6)) {
174 				nla_nest_cancel(skb, nest_ent);
175 				goto out_cancel_err;
176 			}
177 			break;
178 #endif
179 		default:
180 			nla_nest_cancel(skb, nest_ent);
181 			continue;
182 		}
183 		if (nla_put_u32(skb, MDBA_MDB_SRCATTR_TIMER,
184 				br_timer_value(&ent->timer))) {
185 			nla_nest_cancel(skb, nest_ent);
186 			goto out_cancel_err;
187 		}
188 		nla_nest_end(skb, nest_ent);
189 	}
190 
191 	nla_nest_end(skb, nest);
192 
193 	return 0;
194 
195 out_cancel_err:
196 	nla_nest_cancel(skb, nest);
197 	return -EMSGSIZE;
198 }
199 
200 static int __mdb_fill_info(struct sk_buff *skb,
201 			   struct net_bridge_mdb_entry *mp,
202 			   struct net_bridge_port_group *p)
203 {
204 	bool dump_srcs_mode = false;
205 	struct timer_list *mtimer;
206 	struct nlattr *nest_ent;
207 	struct br_mdb_entry e;
208 	u8 flags = 0;
209 	int ifindex;
210 
211 	memset(&e, 0, sizeof(e));
212 	if (p) {
213 		ifindex = p->key.port->dev->ifindex;
214 		mtimer = &p->timer;
215 		flags = p->flags;
216 	} else {
217 		ifindex = mp->br->dev->ifindex;
218 		mtimer = &mp->timer;
219 	}
220 
221 	__mdb_entry_fill_flags(&e, flags);
222 	e.ifindex = ifindex;
223 	e.vid = mp->addr.vid;
224 	if (mp->addr.proto == htons(ETH_P_IP))
225 		e.addr.u.ip4 = mp->addr.dst.ip4;
226 #if IS_ENABLED(CONFIG_IPV6)
227 	else if (mp->addr.proto == htons(ETH_P_IPV6))
228 		e.addr.u.ip6 = mp->addr.dst.ip6;
229 #endif
230 	else
231 		ether_addr_copy(e.addr.u.mac_addr, mp->addr.dst.mac_addr);
232 	e.addr.proto = mp->addr.proto;
233 	nest_ent = nla_nest_start_noflag(skb,
234 					 MDBA_MDB_ENTRY_INFO);
235 	if (!nest_ent)
236 		return -EMSGSIZE;
237 
238 	if (nla_put_nohdr(skb, sizeof(e), &e) ||
239 	    nla_put_u32(skb,
240 			MDBA_MDB_EATTR_TIMER,
241 			br_timer_value(mtimer)))
242 		goto nest_err;
243 
244 	switch (mp->addr.proto) {
245 	case htons(ETH_P_IP):
246 		dump_srcs_mode = !!(mp->br->multicast_ctx.multicast_igmp_version == 3);
247 		if (mp->addr.src.ip4) {
248 			if (nla_put_in_addr(skb, MDBA_MDB_EATTR_SOURCE,
249 					    mp->addr.src.ip4))
250 				goto nest_err;
251 			break;
252 		}
253 		break;
254 #if IS_ENABLED(CONFIG_IPV6)
255 	case htons(ETH_P_IPV6):
256 		dump_srcs_mode = !!(mp->br->multicast_ctx.multicast_mld_version == 2);
257 		if (!ipv6_addr_any(&mp->addr.src.ip6)) {
258 			if (nla_put_in6_addr(skb, MDBA_MDB_EATTR_SOURCE,
259 					     &mp->addr.src.ip6))
260 				goto nest_err;
261 			break;
262 		}
263 		break;
264 #endif
265 	default:
266 		ether_addr_copy(e.addr.u.mac_addr, mp->addr.dst.mac_addr);
267 	}
268 	if (p) {
269 		if (nla_put_u8(skb, MDBA_MDB_EATTR_RTPROT, p->rt_protocol))
270 			goto nest_err;
271 		if (dump_srcs_mode &&
272 		    (__mdb_fill_srcs(skb, p) ||
273 		     nla_put_u8(skb, MDBA_MDB_EATTR_GROUP_MODE,
274 				p->filter_mode)))
275 			goto nest_err;
276 	}
277 	nla_nest_end(skb, nest_ent);
278 
279 	return 0;
280 
281 nest_err:
282 	nla_nest_cancel(skb, nest_ent);
283 	return -EMSGSIZE;
284 }
285 
286 static int br_mdb_fill_info(struct sk_buff *skb, struct netlink_callback *cb,
287 			    struct net_device *dev)
288 {
289 	int idx = 0, s_idx = cb->args[1], err = 0, pidx = 0, s_pidx = cb->args[2];
290 	struct net_bridge *br = netdev_priv(dev);
291 	struct net_bridge_mdb_entry *mp;
292 	struct nlattr *nest, *nest2;
293 
294 	if (!br_opt_get(br, BROPT_MULTICAST_ENABLED))
295 		return 0;
296 
297 	nest = nla_nest_start_noflag(skb, MDBA_MDB);
298 	if (nest == NULL)
299 		return -EMSGSIZE;
300 
301 	hlist_for_each_entry_rcu(mp, &br->mdb_list, mdb_node) {
302 		struct net_bridge_port_group *p;
303 		struct net_bridge_port_group __rcu **pp;
304 
305 		if (idx < s_idx)
306 			goto skip;
307 
308 		nest2 = nla_nest_start_noflag(skb, MDBA_MDB_ENTRY);
309 		if (!nest2) {
310 			err = -EMSGSIZE;
311 			break;
312 		}
313 
314 		if (!s_pidx && mp->host_joined) {
315 			err = __mdb_fill_info(skb, mp, NULL);
316 			if (err) {
317 				nla_nest_cancel(skb, nest2);
318 				break;
319 			}
320 		}
321 
322 		for (pp = &mp->ports; (p = rcu_dereference(*pp)) != NULL;
323 		      pp = &p->next) {
324 			if (!p->key.port)
325 				continue;
326 			if (pidx < s_pidx)
327 				goto skip_pg;
328 
329 			err = __mdb_fill_info(skb, mp, p);
330 			if (err) {
331 				nla_nest_end(skb, nest2);
332 				goto out;
333 			}
334 skip_pg:
335 			pidx++;
336 		}
337 		pidx = 0;
338 		s_pidx = 0;
339 		nla_nest_end(skb, nest2);
340 skip:
341 		idx++;
342 	}
343 
344 out:
345 	cb->args[1] = idx;
346 	cb->args[2] = pidx;
347 	nla_nest_end(skb, nest);
348 	return err;
349 }
350 
351 static int br_mdb_valid_dump_req(const struct nlmsghdr *nlh,
352 				 struct netlink_ext_ack *extack)
353 {
354 	struct br_port_msg *bpm;
355 
356 	if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*bpm))) {
357 		NL_SET_ERR_MSG_MOD(extack, "Invalid header for mdb dump request");
358 		return -EINVAL;
359 	}
360 
361 	bpm = nlmsg_data(nlh);
362 	if (bpm->ifindex) {
363 		NL_SET_ERR_MSG_MOD(extack, "Filtering by device index is not supported for mdb dump request");
364 		return -EINVAL;
365 	}
366 	if (nlmsg_attrlen(nlh, sizeof(*bpm))) {
367 		NL_SET_ERR_MSG(extack, "Invalid data after header in mdb dump request");
368 		return -EINVAL;
369 	}
370 
371 	return 0;
372 }
373 
374 static int br_mdb_dump(struct sk_buff *skb, struct netlink_callback *cb)
375 {
376 	struct net_device *dev;
377 	struct net *net = sock_net(skb->sk);
378 	struct nlmsghdr *nlh = NULL;
379 	int idx = 0, s_idx;
380 
381 	if (cb->strict_check) {
382 		int err = br_mdb_valid_dump_req(cb->nlh, cb->extack);
383 
384 		if (err < 0)
385 			return err;
386 	}
387 
388 	s_idx = cb->args[0];
389 
390 	rcu_read_lock();
391 
392 	cb->seq = net->dev_base_seq;
393 
394 	for_each_netdev_rcu(net, dev) {
395 		if (dev->priv_flags & IFF_EBRIDGE) {
396 			struct net_bridge *br = netdev_priv(dev);
397 			struct br_port_msg *bpm;
398 
399 			if (idx < s_idx)
400 				goto skip;
401 
402 			nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid,
403 					cb->nlh->nlmsg_seq, RTM_GETMDB,
404 					sizeof(*bpm), NLM_F_MULTI);
405 			if (nlh == NULL)
406 				break;
407 
408 			bpm = nlmsg_data(nlh);
409 			memset(bpm, 0, sizeof(*bpm));
410 			bpm->ifindex = dev->ifindex;
411 			if (br_mdb_fill_info(skb, cb, dev) < 0)
412 				goto out;
413 			if (br_rports_fill_info(skb, &br->multicast_ctx) < 0)
414 				goto out;
415 
416 			cb->args[1] = 0;
417 			nlmsg_end(skb, nlh);
418 		skip:
419 			idx++;
420 		}
421 	}
422 
423 out:
424 	if (nlh)
425 		nlmsg_end(skb, nlh);
426 	rcu_read_unlock();
427 	cb->args[0] = idx;
428 	return skb->len;
429 }
430 
431 static int nlmsg_populate_mdb_fill(struct sk_buff *skb,
432 				   struct net_device *dev,
433 				   struct net_bridge_mdb_entry *mp,
434 				   struct net_bridge_port_group *pg,
435 				   int type)
436 {
437 	struct nlmsghdr *nlh;
438 	struct br_port_msg *bpm;
439 	struct nlattr *nest, *nest2;
440 
441 	nlh = nlmsg_put(skb, 0, 0, type, sizeof(*bpm), 0);
442 	if (!nlh)
443 		return -EMSGSIZE;
444 
445 	bpm = nlmsg_data(nlh);
446 	memset(bpm, 0, sizeof(*bpm));
447 	bpm->family  = AF_BRIDGE;
448 	bpm->ifindex = dev->ifindex;
449 	nest = nla_nest_start_noflag(skb, MDBA_MDB);
450 	if (nest == NULL)
451 		goto cancel;
452 	nest2 = nla_nest_start_noflag(skb, MDBA_MDB_ENTRY);
453 	if (nest2 == NULL)
454 		goto end;
455 
456 	if (__mdb_fill_info(skb, mp, pg))
457 		goto end;
458 
459 	nla_nest_end(skb, nest2);
460 	nla_nest_end(skb, nest);
461 	nlmsg_end(skb, nlh);
462 	return 0;
463 
464 end:
465 	nla_nest_end(skb, nest);
466 cancel:
467 	nlmsg_cancel(skb, nlh);
468 	return -EMSGSIZE;
469 }
470 
471 static size_t rtnl_mdb_nlmsg_size(struct net_bridge_port_group *pg)
472 {
473 	size_t nlmsg_size = NLMSG_ALIGN(sizeof(struct br_port_msg)) +
474 			    nla_total_size(sizeof(struct br_mdb_entry)) +
475 			    nla_total_size(sizeof(u32));
476 	struct net_bridge_group_src *ent;
477 	size_t addr_size = 0;
478 
479 	if (!pg)
480 		goto out;
481 
482 	/* MDBA_MDB_EATTR_RTPROT */
483 	nlmsg_size += nla_total_size(sizeof(u8));
484 
485 	switch (pg->key.addr.proto) {
486 	case htons(ETH_P_IP):
487 		/* MDBA_MDB_EATTR_SOURCE */
488 		if (pg->key.addr.src.ip4)
489 			nlmsg_size += nla_total_size(sizeof(__be32));
490 		if (pg->key.port->br->multicast_ctx.multicast_igmp_version == 2)
491 			goto out;
492 		addr_size = sizeof(__be32);
493 		break;
494 #if IS_ENABLED(CONFIG_IPV6)
495 	case htons(ETH_P_IPV6):
496 		/* MDBA_MDB_EATTR_SOURCE */
497 		if (!ipv6_addr_any(&pg->key.addr.src.ip6))
498 			nlmsg_size += nla_total_size(sizeof(struct in6_addr));
499 		if (pg->key.port->br->multicast_ctx.multicast_mld_version == 1)
500 			goto out;
501 		addr_size = sizeof(struct in6_addr);
502 		break;
503 #endif
504 	}
505 
506 	/* MDBA_MDB_EATTR_GROUP_MODE */
507 	nlmsg_size += nla_total_size(sizeof(u8));
508 
509 	/* MDBA_MDB_EATTR_SRC_LIST nested attr */
510 	if (!hlist_empty(&pg->src_list))
511 		nlmsg_size += nla_total_size(0);
512 
513 	hlist_for_each_entry(ent, &pg->src_list, node) {
514 		/* MDBA_MDB_SRCLIST_ENTRY nested attr +
515 		 * MDBA_MDB_SRCATTR_ADDRESS + MDBA_MDB_SRCATTR_TIMER
516 		 */
517 		nlmsg_size += nla_total_size(0) +
518 			      nla_total_size(addr_size) +
519 			      nla_total_size(sizeof(u32));
520 	}
521 out:
522 	return nlmsg_size;
523 }
524 
525 struct br_mdb_complete_info {
526 	struct net_bridge_port *port;
527 	struct br_ip ip;
528 };
529 
530 static void br_mdb_complete(struct net_device *dev, int err, void *priv)
531 {
532 	struct br_mdb_complete_info *data = priv;
533 	struct net_bridge_port_group __rcu **pp;
534 	struct net_bridge_port_group *p;
535 	struct net_bridge_mdb_entry *mp;
536 	struct net_bridge_port *port = data->port;
537 	struct net_bridge *br = port->br;
538 
539 	if (err)
540 		goto err;
541 
542 	spin_lock_bh(&br->multicast_lock);
543 	mp = br_mdb_ip_get(br, &data->ip);
544 	if (!mp)
545 		goto out;
546 	for (pp = &mp->ports; (p = mlock_dereference(*pp, br)) != NULL;
547 	     pp = &p->next) {
548 		if (p->key.port != port)
549 			continue;
550 		p->flags |= MDB_PG_FLAGS_OFFLOAD;
551 	}
552 out:
553 	spin_unlock_bh(&br->multicast_lock);
554 err:
555 	kfree(priv);
556 }
557 
558 static void br_switchdev_mdb_populate(struct switchdev_obj_port_mdb *mdb,
559 				      const struct net_bridge_mdb_entry *mp)
560 {
561 	if (mp->addr.proto == htons(ETH_P_IP))
562 		ip_eth_mc_map(mp->addr.dst.ip4, mdb->addr);
563 #if IS_ENABLED(CONFIG_IPV6)
564 	else if (mp->addr.proto == htons(ETH_P_IPV6))
565 		ipv6_eth_mc_map(&mp->addr.dst.ip6, mdb->addr);
566 #endif
567 	else
568 		ether_addr_copy(mdb->addr, mp->addr.dst.mac_addr);
569 
570 	mdb->vid = mp->addr.vid;
571 }
572 
573 static int br_mdb_replay_one(struct notifier_block *nb, struct net_device *dev,
574 			     const struct switchdev_obj_port_mdb *mdb,
575 			     unsigned long action, const void *ctx,
576 			     struct netlink_ext_ack *extack)
577 {
578 	struct switchdev_notifier_port_obj_info obj_info = {
579 		.info = {
580 			.dev = dev,
581 			.extack = extack,
582 			.ctx = ctx,
583 		},
584 		.obj = &mdb->obj,
585 	};
586 	int err;
587 
588 	err = nb->notifier_call(nb, action, &obj_info);
589 	return notifier_to_errno(err);
590 }
591 
592 static int br_mdb_queue_one(struct list_head *mdb_list,
593 			    enum switchdev_obj_id id,
594 			    const struct net_bridge_mdb_entry *mp,
595 			    struct net_device *orig_dev)
596 {
597 	struct switchdev_obj_port_mdb *mdb;
598 
599 	mdb = kzalloc(sizeof(*mdb), GFP_ATOMIC);
600 	if (!mdb)
601 		return -ENOMEM;
602 
603 	mdb->obj.id = id;
604 	mdb->obj.orig_dev = orig_dev;
605 	br_switchdev_mdb_populate(mdb, mp);
606 	list_add_tail(&mdb->obj.list, mdb_list);
607 
608 	return 0;
609 }
610 
611 int br_mdb_replay(struct net_device *br_dev, struct net_device *dev,
612 		  const void *ctx, bool adding, struct notifier_block *nb,
613 		  struct netlink_ext_ack *extack)
614 {
615 	const struct net_bridge_mdb_entry *mp;
616 	struct switchdev_obj *obj, *tmp;
617 	struct net_bridge *br;
618 	unsigned long action;
619 	LIST_HEAD(mdb_list);
620 	int err = 0;
621 
622 	ASSERT_RTNL();
623 
624 	if (!nb)
625 		return 0;
626 
627 	if (!netif_is_bridge_master(br_dev) || !netif_is_bridge_port(dev))
628 		return -EINVAL;
629 
630 	br = netdev_priv(br_dev);
631 
632 	if (!br_opt_get(br, BROPT_MULTICAST_ENABLED))
633 		return 0;
634 
635 	/* We cannot walk over br->mdb_list protected just by the rtnl_mutex,
636 	 * because the write-side protection is br->multicast_lock. But we
637 	 * need to emulate the [ blocking ] calling context of a regular
638 	 * switchdev event, so since both br->multicast_lock and RCU read side
639 	 * critical sections are atomic, we have no choice but to pick the RCU
640 	 * read side lock, queue up all our events, leave the critical section
641 	 * and notify switchdev from blocking context.
642 	 */
643 	rcu_read_lock();
644 
645 	hlist_for_each_entry_rcu(mp, &br->mdb_list, mdb_node) {
646 		struct net_bridge_port_group __rcu * const *pp;
647 		const struct net_bridge_port_group *p;
648 
649 		if (mp->host_joined) {
650 			err = br_mdb_queue_one(&mdb_list,
651 					       SWITCHDEV_OBJ_ID_HOST_MDB,
652 					       mp, br_dev);
653 			if (err) {
654 				rcu_read_unlock();
655 				goto out_free_mdb;
656 			}
657 		}
658 
659 		for (pp = &mp->ports; (p = rcu_dereference(*pp)) != NULL;
660 		     pp = &p->next) {
661 			if (p->key.port->dev != dev)
662 				continue;
663 
664 			err = br_mdb_queue_one(&mdb_list,
665 					       SWITCHDEV_OBJ_ID_PORT_MDB,
666 					       mp, dev);
667 			if (err) {
668 				rcu_read_unlock();
669 				goto out_free_mdb;
670 			}
671 		}
672 	}
673 
674 	rcu_read_unlock();
675 
676 	if (adding)
677 		action = SWITCHDEV_PORT_OBJ_ADD;
678 	else
679 		action = SWITCHDEV_PORT_OBJ_DEL;
680 
681 	list_for_each_entry(obj, &mdb_list, list) {
682 		err = br_mdb_replay_one(nb, dev, SWITCHDEV_OBJ_PORT_MDB(obj),
683 					action, ctx, extack);
684 		if (err)
685 			goto out_free_mdb;
686 	}
687 
688 out_free_mdb:
689 	list_for_each_entry_safe(obj, tmp, &mdb_list, list) {
690 		list_del(&obj->list);
691 		kfree(SWITCHDEV_OBJ_PORT_MDB(obj));
692 	}
693 
694 	return err;
695 }
696 
697 static void br_mdb_switchdev_host_port(struct net_device *dev,
698 				       struct net_device *lower_dev,
699 				       struct net_bridge_mdb_entry *mp,
700 				       int type)
701 {
702 	struct switchdev_obj_port_mdb mdb = {
703 		.obj = {
704 			.id = SWITCHDEV_OBJ_ID_HOST_MDB,
705 			.flags = SWITCHDEV_F_DEFER,
706 			.orig_dev = dev,
707 		},
708 	};
709 
710 	br_switchdev_mdb_populate(&mdb, mp);
711 
712 	switch (type) {
713 	case RTM_NEWMDB:
714 		switchdev_port_obj_add(lower_dev, &mdb.obj, NULL);
715 		break;
716 	case RTM_DELMDB:
717 		switchdev_port_obj_del(lower_dev, &mdb.obj);
718 		break;
719 	}
720 }
721 
722 static void br_mdb_switchdev_host(struct net_device *dev,
723 				  struct net_bridge_mdb_entry *mp, int type)
724 {
725 	struct net_device *lower_dev;
726 	struct list_head *iter;
727 
728 	netdev_for_each_lower_dev(dev, lower_dev, iter)
729 		br_mdb_switchdev_host_port(dev, lower_dev, mp, type);
730 }
731 
732 void br_mdb_notify(struct net_device *dev,
733 		   struct net_bridge_mdb_entry *mp,
734 		   struct net_bridge_port_group *pg,
735 		   int type)
736 {
737 	struct br_mdb_complete_info *complete_info;
738 	struct switchdev_obj_port_mdb mdb = {
739 		.obj = {
740 			.id = SWITCHDEV_OBJ_ID_PORT_MDB,
741 			.flags = SWITCHDEV_F_DEFER,
742 		},
743 	};
744 	struct net *net = dev_net(dev);
745 	struct sk_buff *skb;
746 	int err = -ENOBUFS;
747 
748 	if (pg) {
749 		br_switchdev_mdb_populate(&mdb, mp);
750 
751 		mdb.obj.orig_dev = pg->key.port->dev;
752 		switch (type) {
753 		case RTM_NEWMDB:
754 			complete_info = kmalloc(sizeof(*complete_info), GFP_ATOMIC);
755 			if (!complete_info)
756 				break;
757 			complete_info->port = pg->key.port;
758 			complete_info->ip = mp->addr;
759 			mdb.obj.complete_priv = complete_info;
760 			mdb.obj.complete = br_mdb_complete;
761 			if (switchdev_port_obj_add(pg->key.port->dev, &mdb.obj, NULL))
762 				kfree(complete_info);
763 			break;
764 		case RTM_DELMDB:
765 			switchdev_port_obj_del(pg->key.port->dev, &mdb.obj);
766 			break;
767 		}
768 	} else {
769 		br_mdb_switchdev_host(dev, mp, type);
770 	}
771 
772 	skb = nlmsg_new(rtnl_mdb_nlmsg_size(pg), GFP_ATOMIC);
773 	if (!skb)
774 		goto errout;
775 
776 	err = nlmsg_populate_mdb_fill(skb, dev, mp, pg, type);
777 	if (err < 0) {
778 		kfree_skb(skb);
779 		goto errout;
780 	}
781 
782 	rtnl_notify(skb, net, 0, RTNLGRP_MDB, NULL, GFP_ATOMIC);
783 	return;
784 errout:
785 	rtnl_set_sk_err(net, RTNLGRP_MDB, err);
786 }
787 
788 static int nlmsg_populate_rtr_fill(struct sk_buff *skb,
789 				   struct net_device *dev,
790 				   int ifindex, u16 vid, u32 pid,
791 				   u32 seq, int type, unsigned int flags)
792 {
793 	struct nlattr *nest, *port_nest;
794 	struct br_port_msg *bpm;
795 	struct nlmsghdr *nlh;
796 
797 	nlh = nlmsg_put(skb, pid, seq, type, sizeof(*bpm), 0);
798 	if (!nlh)
799 		return -EMSGSIZE;
800 
801 	bpm = nlmsg_data(nlh);
802 	memset(bpm, 0, sizeof(*bpm));
803 	bpm->family = AF_BRIDGE;
804 	bpm->ifindex = dev->ifindex;
805 	nest = nla_nest_start_noflag(skb, MDBA_ROUTER);
806 	if (!nest)
807 		goto cancel;
808 
809 	port_nest = nla_nest_start_noflag(skb, MDBA_ROUTER_PORT);
810 	if (!port_nest)
811 		goto end;
812 	if (nla_put_nohdr(skb, sizeof(u32), &ifindex)) {
813 		nla_nest_cancel(skb, port_nest);
814 		goto end;
815 	}
816 	if (vid && nla_put_u16(skb, MDBA_ROUTER_PATTR_VID, vid)) {
817 		nla_nest_cancel(skb, port_nest);
818 		goto end;
819 	}
820 	nla_nest_end(skb, port_nest);
821 
822 	nla_nest_end(skb, nest);
823 	nlmsg_end(skb, nlh);
824 	return 0;
825 
826 end:
827 	nla_nest_end(skb, nest);
828 cancel:
829 	nlmsg_cancel(skb, nlh);
830 	return -EMSGSIZE;
831 }
832 
833 static inline size_t rtnl_rtr_nlmsg_size(void)
834 {
835 	return NLMSG_ALIGN(sizeof(struct br_port_msg))
836 		+ nla_total_size(sizeof(__u32))
837 		+ nla_total_size(sizeof(u16));
838 }
839 
840 void br_rtr_notify(struct net_device *dev, struct net_bridge_mcast_port *pmctx,
841 		   int type)
842 {
843 	struct net *net = dev_net(dev);
844 	struct sk_buff *skb;
845 	int err = -ENOBUFS;
846 	int ifindex;
847 	u16 vid;
848 
849 	ifindex = pmctx ? pmctx->port->dev->ifindex : 0;
850 	vid = pmctx && br_multicast_port_ctx_is_vlan(pmctx) ? pmctx->vlan->vid :
851 							      0;
852 	skb = nlmsg_new(rtnl_rtr_nlmsg_size(), GFP_ATOMIC);
853 	if (!skb)
854 		goto errout;
855 
856 	err = nlmsg_populate_rtr_fill(skb, dev, ifindex, vid, 0, 0, type,
857 				      NTF_SELF);
858 	if (err < 0) {
859 		kfree_skb(skb);
860 		goto errout;
861 	}
862 
863 	rtnl_notify(skb, net, 0, RTNLGRP_MDB, NULL, GFP_ATOMIC);
864 	return;
865 
866 errout:
867 	rtnl_set_sk_err(net, RTNLGRP_MDB, err);
868 }
869 
870 static bool is_valid_mdb_entry(struct br_mdb_entry *entry,
871 			       struct netlink_ext_ack *extack)
872 {
873 	if (entry->ifindex == 0) {
874 		NL_SET_ERR_MSG_MOD(extack, "Zero entry ifindex is not allowed");
875 		return false;
876 	}
877 
878 	if (entry->addr.proto == htons(ETH_P_IP)) {
879 		if (!ipv4_is_multicast(entry->addr.u.ip4)) {
880 			NL_SET_ERR_MSG_MOD(extack, "IPv4 entry group address is not multicast");
881 			return false;
882 		}
883 		if (ipv4_is_local_multicast(entry->addr.u.ip4)) {
884 			NL_SET_ERR_MSG_MOD(extack, "IPv4 entry group address is local multicast");
885 			return false;
886 		}
887 #if IS_ENABLED(CONFIG_IPV6)
888 	} else if (entry->addr.proto == htons(ETH_P_IPV6)) {
889 		if (ipv6_addr_is_ll_all_nodes(&entry->addr.u.ip6)) {
890 			NL_SET_ERR_MSG_MOD(extack, "IPv6 entry group address is link-local all nodes");
891 			return false;
892 		}
893 #endif
894 	} else if (entry->addr.proto == 0) {
895 		/* L2 mdb */
896 		if (!is_multicast_ether_addr(entry->addr.u.mac_addr)) {
897 			NL_SET_ERR_MSG_MOD(extack, "L2 entry group is not multicast");
898 			return false;
899 		}
900 	} else {
901 		NL_SET_ERR_MSG_MOD(extack, "Unknown entry protocol");
902 		return false;
903 	}
904 
905 	if (entry->state != MDB_PERMANENT && entry->state != MDB_TEMPORARY) {
906 		NL_SET_ERR_MSG_MOD(extack, "Unknown entry state");
907 		return false;
908 	}
909 	if (entry->vid >= VLAN_VID_MASK) {
910 		NL_SET_ERR_MSG_MOD(extack, "Invalid entry VLAN id");
911 		return false;
912 	}
913 
914 	return true;
915 }
916 
917 static bool is_valid_mdb_source(struct nlattr *attr, __be16 proto,
918 				struct netlink_ext_ack *extack)
919 {
920 	switch (proto) {
921 	case htons(ETH_P_IP):
922 		if (nla_len(attr) != sizeof(struct in_addr)) {
923 			NL_SET_ERR_MSG_MOD(extack, "IPv4 invalid source address length");
924 			return false;
925 		}
926 		if (ipv4_is_multicast(nla_get_in_addr(attr))) {
927 			NL_SET_ERR_MSG_MOD(extack, "IPv4 multicast source address is not allowed");
928 			return false;
929 		}
930 		break;
931 #if IS_ENABLED(CONFIG_IPV6)
932 	case htons(ETH_P_IPV6): {
933 		struct in6_addr src;
934 
935 		if (nla_len(attr) != sizeof(struct in6_addr)) {
936 			NL_SET_ERR_MSG_MOD(extack, "IPv6 invalid source address length");
937 			return false;
938 		}
939 		src = nla_get_in6_addr(attr);
940 		if (ipv6_addr_is_multicast(&src)) {
941 			NL_SET_ERR_MSG_MOD(extack, "IPv6 multicast source address is not allowed");
942 			return false;
943 		}
944 		break;
945 	}
946 #endif
947 	default:
948 		NL_SET_ERR_MSG_MOD(extack, "Invalid protocol used with source address");
949 		return false;
950 	}
951 
952 	return true;
953 }
954 
955 static const struct nla_policy br_mdbe_attrs_pol[MDBE_ATTR_MAX + 1] = {
956 	[MDBE_ATTR_SOURCE] = NLA_POLICY_RANGE(NLA_BINARY,
957 					      sizeof(struct in_addr),
958 					      sizeof(struct in6_addr)),
959 };
960 
961 static int br_mdb_parse(struct sk_buff *skb, struct nlmsghdr *nlh,
962 			struct net_device **pdev, struct br_mdb_entry **pentry,
963 			struct nlattr **mdb_attrs, struct netlink_ext_ack *extack)
964 {
965 	struct net *net = sock_net(skb->sk);
966 	struct br_mdb_entry *entry;
967 	struct br_port_msg *bpm;
968 	struct nlattr *tb[MDBA_SET_ENTRY_MAX+1];
969 	struct net_device *dev;
970 	int err;
971 
972 	err = nlmsg_parse_deprecated(nlh, sizeof(*bpm), tb,
973 				     MDBA_SET_ENTRY_MAX, NULL, NULL);
974 	if (err < 0)
975 		return err;
976 
977 	bpm = nlmsg_data(nlh);
978 	if (bpm->ifindex == 0) {
979 		NL_SET_ERR_MSG_MOD(extack, "Invalid bridge ifindex");
980 		return -EINVAL;
981 	}
982 
983 	dev = __dev_get_by_index(net, bpm->ifindex);
984 	if (dev == NULL) {
985 		NL_SET_ERR_MSG_MOD(extack, "Bridge device doesn't exist");
986 		return -ENODEV;
987 	}
988 
989 	if (!(dev->priv_flags & IFF_EBRIDGE)) {
990 		NL_SET_ERR_MSG_MOD(extack, "Device is not a bridge");
991 		return -EOPNOTSUPP;
992 	}
993 
994 	*pdev = dev;
995 
996 	if (!tb[MDBA_SET_ENTRY]) {
997 		NL_SET_ERR_MSG_MOD(extack, "Missing MDBA_SET_ENTRY attribute");
998 		return -EINVAL;
999 	}
1000 	if (nla_len(tb[MDBA_SET_ENTRY]) != sizeof(struct br_mdb_entry)) {
1001 		NL_SET_ERR_MSG_MOD(extack, "Invalid MDBA_SET_ENTRY attribute length");
1002 		return -EINVAL;
1003 	}
1004 
1005 	entry = nla_data(tb[MDBA_SET_ENTRY]);
1006 	if (!is_valid_mdb_entry(entry, extack))
1007 		return -EINVAL;
1008 	*pentry = entry;
1009 
1010 	if (tb[MDBA_SET_ENTRY_ATTRS]) {
1011 		err = nla_parse_nested(mdb_attrs, MDBE_ATTR_MAX,
1012 				       tb[MDBA_SET_ENTRY_ATTRS],
1013 				       br_mdbe_attrs_pol, extack);
1014 		if (err)
1015 			return err;
1016 		if (mdb_attrs[MDBE_ATTR_SOURCE] &&
1017 		    !is_valid_mdb_source(mdb_attrs[MDBE_ATTR_SOURCE],
1018 					 entry->addr.proto, extack))
1019 			return -EINVAL;
1020 	} else {
1021 		memset(mdb_attrs, 0,
1022 		       sizeof(struct nlattr *) * (MDBE_ATTR_MAX + 1));
1023 	}
1024 
1025 	return 0;
1026 }
1027 
1028 static struct net_bridge_mcast *
1029 __br_mdb_choose_context(struct net_bridge *br,
1030 			const struct br_mdb_entry *entry,
1031 			struct netlink_ext_ack *extack)
1032 {
1033 	struct net_bridge_mcast *brmctx = NULL;
1034 	struct net_bridge_vlan *v;
1035 
1036 	if (!br_opt_get(br, BROPT_MCAST_VLAN_SNOOPING_ENABLED)) {
1037 		brmctx = &br->multicast_ctx;
1038 		goto out;
1039 	}
1040 
1041 	if (!entry->vid) {
1042 		NL_SET_ERR_MSG_MOD(extack, "Cannot add an entry without a vlan when vlan snooping is enabled");
1043 		goto out;
1044 	}
1045 
1046 	v = br_vlan_find(br_vlan_group(br), entry->vid);
1047 	if (!v) {
1048 		NL_SET_ERR_MSG_MOD(extack, "Vlan is not configured");
1049 		goto out;
1050 	}
1051 	if (br_multicast_ctx_vlan_global_disabled(&v->br_mcast_ctx)) {
1052 		NL_SET_ERR_MSG_MOD(extack, "Vlan's multicast processing is disabled");
1053 		goto out;
1054 	}
1055 	brmctx = &v->br_mcast_ctx;
1056 out:
1057 	return brmctx;
1058 }
1059 
1060 static int br_mdb_add_group(struct net_bridge *br, struct net_bridge_port *port,
1061 			    struct br_mdb_entry *entry,
1062 			    struct nlattr **mdb_attrs,
1063 			    struct netlink_ext_ack *extack)
1064 {
1065 	struct net_bridge_mdb_entry *mp, *star_mp;
1066 	struct net_bridge_port_group __rcu **pp;
1067 	struct net_bridge_port_group *p;
1068 	struct net_bridge_mcast *brmctx;
1069 	struct br_ip group, star_group;
1070 	unsigned long now = jiffies;
1071 	unsigned char flags = 0;
1072 	u8 filter_mode;
1073 	int err;
1074 
1075 	__mdb_entry_to_br_ip(entry, &group, mdb_attrs);
1076 
1077 	brmctx = __br_mdb_choose_context(br, entry, extack);
1078 	if (!brmctx)
1079 		return -EINVAL;
1080 
1081 	/* host join errors which can happen before creating the group */
1082 	if (!port) {
1083 		/* don't allow any flags for host-joined groups */
1084 		if (entry->state) {
1085 			NL_SET_ERR_MSG_MOD(extack, "Flags are not allowed for host groups");
1086 			return -EINVAL;
1087 		}
1088 		if (!br_multicast_is_star_g(&group)) {
1089 			NL_SET_ERR_MSG_MOD(extack, "Groups with sources cannot be manually host joined");
1090 			return -EINVAL;
1091 		}
1092 	}
1093 
1094 	if (br_group_is_l2(&group) && entry->state != MDB_PERMANENT) {
1095 		NL_SET_ERR_MSG_MOD(extack, "Only permanent L2 entries allowed");
1096 		return -EINVAL;
1097 	}
1098 
1099 	mp = br_mdb_ip_get(br, &group);
1100 	if (!mp) {
1101 		mp = br_multicast_new_group(br, &group);
1102 		err = PTR_ERR_OR_ZERO(mp);
1103 		if (err)
1104 			return err;
1105 	}
1106 
1107 	/* host join */
1108 	if (!port) {
1109 		if (mp->host_joined) {
1110 			NL_SET_ERR_MSG_MOD(extack, "Group is already joined by host");
1111 			return -EEXIST;
1112 		}
1113 
1114 		br_multicast_host_join(brmctx, mp, false);
1115 		br_mdb_notify(br->dev, mp, NULL, RTM_NEWMDB);
1116 
1117 		return 0;
1118 	}
1119 
1120 	for (pp = &mp->ports;
1121 	     (p = mlock_dereference(*pp, br)) != NULL;
1122 	     pp = &p->next) {
1123 		if (p->key.port == port) {
1124 			NL_SET_ERR_MSG_MOD(extack, "Group is already joined by port");
1125 			return -EEXIST;
1126 		}
1127 		if ((unsigned long)p->key.port < (unsigned long)port)
1128 			break;
1129 	}
1130 
1131 	filter_mode = br_multicast_is_star_g(&group) ? MCAST_EXCLUDE :
1132 						       MCAST_INCLUDE;
1133 
1134 	if (entry->state == MDB_PERMANENT)
1135 		flags |= MDB_PG_FLAGS_PERMANENT;
1136 
1137 	p = br_multicast_new_port_group(port, &group, *pp, flags, NULL,
1138 					filter_mode, RTPROT_STATIC);
1139 	if (unlikely(!p)) {
1140 		NL_SET_ERR_MSG_MOD(extack, "Couldn't allocate new port group");
1141 		return -ENOMEM;
1142 	}
1143 	rcu_assign_pointer(*pp, p);
1144 	if (entry->state == MDB_TEMPORARY)
1145 		mod_timer(&p->timer,
1146 			  now + brmctx->multicast_membership_interval);
1147 	br_mdb_notify(br->dev, mp, p, RTM_NEWMDB);
1148 	/* if we are adding a new EXCLUDE port group (*,G) it needs to be also
1149 	 * added to all S,G entries for proper replication, if we are adding
1150 	 * a new INCLUDE port (S,G) then all of *,G EXCLUDE ports need to be
1151 	 * added to it for proper replication
1152 	 */
1153 	if (br_multicast_should_handle_mode(brmctx, group.proto)) {
1154 		switch (filter_mode) {
1155 		case MCAST_EXCLUDE:
1156 			br_multicast_star_g_handle_mode(p, MCAST_EXCLUDE);
1157 			break;
1158 		case MCAST_INCLUDE:
1159 			star_group = p->key.addr;
1160 			memset(&star_group.src, 0, sizeof(star_group.src));
1161 			star_mp = br_mdb_ip_get(br, &star_group);
1162 			if (star_mp)
1163 				br_multicast_sg_add_exclude_ports(star_mp, p);
1164 			break;
1165 		}
1166 	}
1167 
1168 	return 0;
1169 }
1170 
1171 static int __br_mdb_add(struct net *net, struct net_bridge *br,
1172 			struct net_bridge_port *p,
1173 			struct br_mdb_entry *entry,
1174 			struct nlattr **mdb_attrs,
1175 			struct netlink_ext_ack *extack)
1176 {
1177 	int ret;
1178 
1179 	spin_lock_bh(&br->multicast_lock);
1180 	ret = br_mdb_add_group(br, p, entry, mdb_attrs, extack);
1181 	spin_unlock_bh(&br->multicast_lock);
1182 
1183 	return ret;
1184 }
1185 
1186 static int br_mdb_add(struct sk_buff *skb, struct nlmsghdr *nlh,
1187 		      struct netlink_ext_ack *extack)
1188 {
1189 	struct nlattr *mdb_attrs[MDBE_ATTR_MAX + 1];
1190 	struct net *net = sock_net(skb->sk);
1191 	struct net_bridge_vlan_group *vg;
1192 	struct net_bridge_port *p = NULL;
1193 	struct net_device *dev, *pdev;
1194 	struct br_mdb_entry *entry;
1195 	struct net_bridge_vlan *v;
1196 	struct net_bridge *br;
1197 	int err;
1198 
1199 	err = br_mdb_parse(skb, nlh, &dev, &entry, mdb_attrs, extack);
1200 	if (err < 0)
1201 		return err;
1202 
1203 	br = netdev_priv(dev);
1204 
1205 	if (!netif_running(br->dev)) {
1206 		NL_SET_ERR_MSG_MOD(extack, "Bridge device is not running");
1207 		return -EINVAL;
1208 	}
1209 
1210 	if (!br_opt_get(br, BROPT_MULTICAST_ENABLED)) {
1211 		NL_SET_ERR_MSG_MOD(extack, "Bridge's multicast processing is disabled");
1212 		return -EINVAL;
1213 	}
1214 
1215 	if (entry->ifindex != br->dev->ifindex) {
1216 		pdev = __dev_get_by_index(net, entry->ifindex);
1217 		if (!pdev) {
1218 			NL_SET_ERR_MSG_MOD(extack, "Port net device doesn't exist");
1219 			return -ENODEV;
1220 		}
1221 
1222 		p = br_port_get_rtnl(pdev);
1223 		if (!p) {
1224 			NL_SET_ERR_MSG_MOD(extack, "Net device is not a bridge port");
1225 			return -EINVAL;
1226 		}
1227 
1228 		if (p->br != br) {
1229 			NL_SET_ERR_MSG_MOD(extack, "Port belongs to a different bridge device");
1230 			return -EINVAL;
1231 		}
1232 		if (p->state == BR_STATE_DISABLED) {
1233 			NL_SET_ERR_MSG_MOD(extack, "Port is in disabled state");
1234 			return -EINVAL;
1235 		}
1236 		vg = nbp_vlan_group(p);
1237 	} else {
1238 		vg = br_vlan_group(br);
1239 	}
1240 
1241 	/* If vlan filtering is enabled and VLAN is not specified
1242 	 * install mdb entry on all vlans configured on the port.
1243 	 */
1244 	if (br_vlan_enabled(br->dev) && vg && entry->vid == 0) {
1245 		list_for_each_entry(v, &vg->vlan_list, vlist) {
1246 			entry->vid = v->vid;
1247 			err = __br_mdb_add(net, br, p, entry, mdb_attrs, extack);
1248 			if (err)
1249 				break;
1250 		}
1251 	} else {
1252 		err = __br_mdb_add(net, br, p, entry, mdb_attrs, extack);
1253 	}
1254 
1255 	return err;
1256 }
1257 
1258 static int __br_mdb_del(struct net_bridge *br, struct br_mdb_entry *entry,
1259 			struct nlattr **mdb_attrs)
1260 {
1261 	struct net_bridge_mdb_entry *mp;
1262 	struct net_bridge_port_group *p;
1263 	struct net_bridge_port_group __rcu **pp;
1264 	struct br_ip ip;
1265 	int err = -EINVAL;
1266 
1267 	if (!netif_running(br->dev) || !br_opt_get(br, BROPT_MULTICAST_ENABLED))
1268 		return -EINVAL;
1269 
1270 	__mdb_entry_to_br_ip(entry, &ip, mdb_attrs);
1271 
1272 	spin_lock_bh(&br->multicast_lock);
1273 	mp = br_mdb_ip_get(br, &ip);
1274 	if (!mp)
1275 		goto unlock;
1276 
1277 	/* host leave */
1278 	if (entry->ifindex == mp->br->dev->ifindex && mp->host_joined) {
1279 		br_multicast_host_leave(mp, false);
1280 		err = 0;
1281 		br_mdb_notify(br->dev, mp, NULL, RTM_DELMDB);
1282 		if (!mp->ports && netif_running(br->dev))
1283 			mod_timer(&mp->timer, jiffies);
1284 		goto unlock;
1285 	}
1286 
1287 	for (pp = &mp->ports;
1288 	     (p = mlock_dereference(*pp, br)) != NULL;
1289 	     pp = &p->next) {
1290 		if (!p->key.port || p->key.port->dev->ifindex != entry->ifindex)
1291 			continue;
1292 
1293 		if (p->key.port->state == BR_STATE_DISABLED)
1294 			goto unlock;
1295 
1296 		br_multicast_del_pg(mp, p, pp);
1297 		err = 0;
1298 		break;
1299 	}
1300 
1301 unlock:
1302 	spin_unlock_bh(&br->multicast_lock);
1303 	return err;
1304 }
1305 
1306 static int br_mdb_del(struct sk_buff *skb, struct nlmsghdr *nlh,
1307 		      struct netlink_ext_ack *extack)
1308 {
1309 	struct nlattr *mdb_attrs[MDBE_ATTR_MAX + 1];
1310 	struct net *net = sock_net(skb->sk);
1311 	struct net_bridge_vlan_group *vg;
1312 	struct net_bridge_port *p = NULL;
1313 	struct net_device *dev, *pdev;
1314 	struct br_mdb_entry *entry;
1315 	struct net_bridge_vlan *v;
1316 	struct net_bridge *br;
1317 	int err;
1318 
1319 	err = br_mdb_parse(skb, nlh, &dev, &entry, mdb_attrs, extack);
1320 	if (err < 0)
1321 		return err;
1322 
1323 	br = netdev_priv(dev);
1324 
1325 	if (entry->ifindex != br->dev->ifindex) {
1326 		pdev = __dev_get_by_index(net, entry->ifindex);
1327 		if (!pdev)
1328 			return -ENODEV;
1329 
1330 		p = br_port_get_rtnl(pdev);
1331 		if (!p || p->br != br || p->state == BR_STATE_DISABLED)
1332 			return -EINVAL;
1333 		vg = nbp_vlan_group(p);
1334 	} else {
1335 		vg = br_vlan_group(br);
1336 	}
1337 
1338 	/* If vlan filtering is enabled and VLAN is not specified
1339 	 * delete mdb entry on all vlans configured on the port.
1340 	 */
1341 	if (br_vlan_enabled(br->dev) && vg && entry->vid == 0) {
1342 		list_for_each_entry(v, &vg->vlan_list, vlist) {
1343 			entry->vid = v->vid;
1344 			err = __br_mdb_del(br, entry, mdb_attrs);
1345 		}
1346 	} else {
1347 		err = __br_mdb_del(br, entry, mdb_attrs);
1348 	}
1349 
1350 	return err;
1351 }
1352 
1353 void br_mdb_init(void)
1354 {
1355 	rtnl_register_module(THIS_MODULE, PF_BRIDGE, RTM_GETMDB, NULL, br_mdb_dump, 0);
1356 	rtnl_register_module(THIS_MODULE, PF_BRIDGE, RTM_NEWMDB, br_mdb_add, NULL, 0);
1357 	rtnl_register_module(THIS_MODULE, PF_BRIDGE, RTM_DELMDB, br_mdb_del, NULL, 0);
1358 }
1359 
1360 void br_mdb_uninit(void)
1361 {
1362 	rtnl_unregister(PF_BRIDGE, RTM_GETMDB);
1363 	rtnl_unregister(PF_BRIDGE, RTM_NEWMDB);
1364 	rtnl_unregister(PF_BRIDGE, RTM_DELMDB);
1365 }
1366