xref: /linux/net/bridge/br_mdb.c (revision c532de5a67a70f8533d495f8f2aaa9a0491c3ad0)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/err.h>
3 #include <linux/igmp.h>
4 #include <linux/kernel.h>
5 #include <linux/netdevice.h>
6 #include <linux/rculist.h>
7 #include <linux/skbuff.h>
8 #include <linux/if_ether.h>
9 #include <net/ip.h>
10 #include <net/netlink.h>
11 #include <net/switchdev.h>
12 #if IS_ENABLED(CONFIG_IPV6)
13 #include <net/ipv6.h>
14 #include <net/addrconf.h>
15 #endif
16 
17 #include "br_private.h"
18 
19 static bool
20 br_ip4_rports_get_timer(struct net_bridge_mcast_port *pmctx,
21 			unsigned long *timer)
22 {
23 	*timer = br_timer_value(&pmctx->ip4_mc_router_timer);
24 	return !hlist_unhashed(&pmctx->ip4_rlist);
25 }
26 
27 static bool
28 br_ip6_rports_get_timer(struct net_bridge_mcast_port *pmctx,
29 			unsigned long *timer)
30 {
31 #if IS_ENABLED(CONFIG_IPV6)
32 	*timer = br_timer_value(&pmctx->ip6_mc_router_timer);
33 	return !hlist_unhashed(&pmctx->ip6_rlist);
34 #else
35 	*timer = 0;
36 	return false;
37 #endif
38 }
39 
40 static size_t __br_rports_one_size(void)
41 {
42 	return nla_total_size(sizeof(u32)) + /* MDBA_ROUTER_PORT */
43 	       nla_total_size(sizeof(u32)) + /* MDBA_ROUTER_PATTR_TIMER */
44 	       nla_total_size(sizeof(u8)) +  /* MDBA_ROUTER_PATTR_TYPE */
45 	       nla_total_size(sizeof(u32)) + /* MDBA_ROUTER_PATTR_INET_TIMER */
46 	       nla_total_size(sizeof(u32)) + /* MDBA_ROUTER_PATTR_INET6_TIMER */
47 	       nla_total_size(sizeof(u32));  /* MDBA_ROUTER_PATTR_VID */
48 }
49 
50 size_t br_rports_size(const struct net_bridge_mcast *brmctx)
51 {
52 	struct net_bridge_mcast_port *pmctx;
53 	size_t size = nla_total_size(0); /* MDBA_ROUTER */
54 
55 	rcu_read_lock();
56 	hlist_for_each_entry_rcu(pmctx, &brmctx->ip4_mc_router_list,
57 				 ip4_rlist)
58 		size += __br_rports_one_size();
59 
60 #if IS_ENABLED(CONFIG_IPV6)
61 	hlist_for_each_entry_rcu(pmctx, &brmctx->ip6_mc_router_list,
62 				 ip6_rlist)
63 		size += __br_rports_one_size();
64 #endif
65 	rcu_read_unlock();
66 
67 	return size;
68 }
69 
70 int br_rports_fill_info(struct sk_buff *skb,
71 			const struct net_bridge_mcast *brmctx)
72 {
73 	u16 vid = brmctx->vlan ? brmctx->vlan->vid : 0;
74 	bool have_ip4_mc_rtr, have_ip6_mc_rtr;
75 	unsigned long ip4_timer, ip6_timer;
76 	struct nlattr *nest, *port_nest;
77 	struct net_bridge_port *p;
78 
79 	if (!brmctx->multicast_router || !br_rports_have_mc_router(brmctx))
80 		return 0;
81 
82 	nest = nla_nest_start_noflag(skb, MDBA_ROUTER);
83 	if (nest == NULL)
84 		return -EMSGSIZE;
85 
86 	list_for_each_entry_rcu(p, &brmctx->br->port_list, list) {
87 		struct net_bridge_mcast_port *pmctx;
88 
89 		if (vid) {
90 			struct net_bridge_vlan *v;
91 
92 			v = br_vlan_find(nbp_vlan_group(p), vid);
93 			if (!v)
94 				continue;
95 			pmctx = &v->port_mcast_ctx;
96 		} else {
97 			pmctx = &p->multicast_ctx;
98 		}
99 
100 		have_ip4_mc_rtr = br_ip4_rports_get_timer(pmctx, &ip4_timer);
101 		have_ip6_mc_rtr = br_ip6_rports_get_timer(pmctx, &ip6_timer);
102 
103 		if (!have_ip4_mc_rtr && !have_ip6_mc_rtr)
104 			continue;
105 
106 		port_nest = nla_nest_start_noflag(skb, MDBA_ROUTER_PORT);
107 		if (!port_nest)
108 			goto fail;
109 
110 		if (nla_put_nohdr(skb, sizeof(u32), &p->dev->ifindex) ||
111 		    nla_put_u32(skb, MDBA_ROUTER_PATTR_TIMER,
112 				max(ip4_timer, ip6_timer)) ||
113 		    nla_put_u8(skb, MDBA_ROUTER_PATTR_TYPE,
114 			       p->multicast_ctx.multicast_router) ||
115 		    (have_ip4_mc_rtr &&
116 		     nla_put_u32(skb, MDBA_ROUTER_PATTR_INET_TIMER,
117 				 ip4_timer)) ||
118 		    (have_ip6_mc_rtr &&
119 		     nla_put_u32(skb, MDBA_ROUTER_PATTR_INET6_TIMER,
120 				 ip6_timer)) ||
121 		    (vid && nla_put_u16(skb, MDBA_ROUTER_PATTR_VID, vid))) {
122 			nla_nest_cancel(skb, port_nest);
123 			goto fail;
124 		}
125 		nla_nest_end(skb, port_nest);
126 	}
127 
128 	nla_nest_end(skb, nest);
129 	return 0;
130 fail:
131 	nla_nest_cancel(skb, nest);
132 	return -EMSGSIZE;
133 }
134 
135 static void __mdb_entry_fill_flags(struct br_mdb_entry *e, unsigned char flags)
136 {
137 	e->state = flags & MDB_PG_FLAGS_PERMANENT;
138 	e->flags = 0;
139 	if (flags & MDB_PG_FLAGS_OFFLOAD)
140 		e->flags |= MDB_FLAGS_OFFLOAD;
141 	if (flags & MDB_PG_FLAGS_FAST_LEAVE)
142 		e->flags |= MDB_FLAGS_FAST_LEAVE;
143 	if (flags & MDB_PG_FLAGS_STAR_EXCL)
144 		e->flags |= MDB_FLAGS_STAR_EXCL;
145 	if (flags & MDB_PG_FLAGS_BLOCKED)
146 		e->flags |= MDB_FLAGS_BLOCKED;
147 }
148 
149 static void __mdb_entry_to_br_ip(struct br_mdb_entry *entry, struct br_ip *ip,
150 				 struct nlattr **mdb_attrs)
151 {
152 	memset(ip, 0, sizeof(struct br_ip));
153 	ip->vid = entry->vid;
154 	ip->proto = entry->addr.proto;
155 	switch (ip->proto) {
156 	case htons(ETH_P_IP):
157 		ip->dst.ip4 = entry->addr.u.ip4;
158 		if (mdb_attrs && mdb_attrs[MDBE_ATTR_SOURCE])
159 			ip->src.ip4 = nla_get_in_addr(mdb_attrs[MDBE_ATTR_SOURCE]);
160 		break;
161 #if IS_ENABLED(CONFIG_IPV6)
162 	case htons(ETH_P_IPV6):
163 		ip->dst.ip6 = entry->addr.u.ip6;
164 		if (mdb_attrs && mdb_attrs[MDBE_ATTR_SOURCE])
165 			ip->src.ip6 = nla_get_in6_addr(mdb_attrs[MDBE_ATTR_SOURCE]);
166 		break;
167 #endif
168 	default:
169 		ether_addr_copy(ip->dst.mac_addr, entry->addr.u.mac_addr);
170 	}
171 
172 }
173 
174 static int __mdb_fill_srcs(struct sk_buff *skb,
175 			   struct net_bridge_port_group *p)
176 {
177 	struct net_bridge_group_src *ent;
178 	struct nlattr *nest, *nest_ent;
179 
180 	if (hlist_empty(&p->src_list))
181 		return 0;
182 
183 	nest = nla_nest_start(skb, MDBA_MDB_EATTR_SRC_LIST);
184 	if (!nest)
185 		return -EMSGSIZE;
186 
187 	hlist_for_each_entry_rcu(ent, &p->src_list, node,
188 				 lockdep_is_held(&p->key.port->br->multicast_lock)) {
189 		nest_ent = nla_nest_start(skb, MDBA_MDB_SRCLIST_ENTRY);
190 		if (!nest_ent)
191 			goto out_cancel_err;
192 		switch (ent->addr.proto) {
193 		case htons(ETH_P_IP):
194 			if (nla_put_in_addr(skb, MDBA_MDB_SRCATTR_ADDRESS,
195 					    ent->addr.src.ip4)) {
196 				nla_nest_cancel(skb, nest_ent);
197 				goto out_cancel_err;
198 			}
199 			break;
200 #if IS_ENABLED(CONFIG_IPV6)
201 		case htons(ETH_P_IPV6):
202 			if (nla_put_in6_addr(skb, MDBA_MDB_SRCATTR_ADDRESS,
203 					     &ent->addr.src.ip6)) {
204 				nla_nest_cancel(skb, nest_ent);
205 				goto out_cancel_err;
206 			}
207 			break;
208 #endif
209 		default:
210 			nla_nest_cancel(skb, nest_ent);
211 			continue;
212 		}
213 		if (nla_put_u32(skb, MDBA_MDB_SRCATTR_TIMER,
214 				br_timer_value(&ent->timer))) {
215 			nla_nest_cancel(skb, nest_ent);
216 			goto out_cancel_err;
217 		}
218 		nla_nest_end(skb, nest_ent);
219 	}
220 
221 	nla_nest_end(skb, nest);
222 
223 	return 0;
224 
225 out_cancel_err:
226 	nla_nest_cancel(skb, nest);
227 	return -EMSGSIZE;
228 }
229 
230 static int __mdb_fill_info(struct sk_buff *skb,
231 			   struct net_bridge_mdb_entry *mp,
232 			   struct net_bridge_port_group *p)
233 {
234 	bool dump_srcs_mode = false;
235 	struct timer_list *mtimer;
236 	struct nlattr *nest_ent;
237 	struct br_mdb_entry e;
238 	u8 flags = 0;
239 	int ifindex;
240 
241 	memset(&e, 0, sizeof(e));
242 	if (p) {
243 		ifindex = p->key.port->dev->ifindex;
244 		mtimer = &p->timer;
245 		flags = p->flags;
246 	} else {
247 		ifindex = mp->br->dev->ifindex;
248 		mtimer = &mp->timer;
249 	}
250 
251 	__mdb_entry_fill_flags(&e, flags);
252 	e.ifindex = ifindex;
253 	e.vid = mp->addr.vid;
254 	if (mp->addr.proto == htons(ETH_P_IP)) {
255 		e.addr.u.ip4 = mp->addr.dst.ip4;
256 #if IS_ENABLED(CONFIG_IPV6)
257 	} else if (mp->addr.proto == htons(ETH_P_IPV6)) {
258 		e.addr.u.ip6 = mp->addr.dst.ip6;
259 #endif
260 	} else {
261 		ether_addr_copy(e.addr.u.mac_addr, mp->addr.dst.mac_addr);
262 		e.state = MDB_PERMANENT;
263 	}
264 	e.addr.proto = mp->addr.proto;
265 	nest_ent = nla_nest_start_noflag(skb,
266 					 MDBA_MDB_ENTRY_INFO);
267 	if (!nest_ent)
268 		return -EMSGSIZE;
269 
270 	if (nla_put_nohdr(skb, sizeof(e), &e) ||
271 	    nla_put_u32(skb,
272 			MDBA_MDB_EATTR_TIMER,
273 			br_timer_value(mtimer)))
274 		goto nest_err;
275 
276 	switch (mp->addr.proto) {
277 	case htons(ETH_P_IP):
278 		dump_srcs_mode = !!(mp->br->multicast_ctx.multicast_igmp_version == 3);
279 		if (mp->addr.src.ip4) {
280 			if (nla_put_in_addr(skb, MDBA_MDB_EATTR_SOURCE,
281 					    mp->addr.src.ip4))
282 				goto nest_err;
283 			break;
284 		}
285 		break;
286 #if IS_ENABLED(CONFIG_IPV6)
287 	case htons(ETH_P_IPV6):
288 		dump_srcs_mode = !!(mp->br->multicast_ctx.multicast_mld_version == 2);
289 		if (!ipv6_addr_any(&mp->addr.src.ip6)) {
290 			if (nla_put_in6_addr(skb, MDBA_MDB_EATTR_SOURCE,
291 					     &mp->addr.src.ip6))
292 				goto nest_err;
293 			break;
294 		}
295 		break;
296 #endif
297 	default:
298 		ether_addr_copy(e.addr.u.mac_addr, mp->addr.dst.mac_addr);
299 	}
300 	if (p) {
301 		if (nla_put_u8(skb, MDBA_MDB_EATTR_RTPROT, p->rt_protocol))
302 			goto nest_err;
303 		if (dump_srcs_mode &&
304 		    (__mdb_fill_srcs(skb, p) ||
305 		     nla_put_u8(skb, MDBA_MDB_EATTR_GROUP_MODE,
306 				p->filter_mode)))
307 			goto nest_err;
308 	}
309 	nla_nest_end(skb, nest_ent);
310 
311 	return 0;
312 
313 nest_err:
314 	nla_nest_cancel(skb, nest_ent);
315 	return -EMSGSIZE;
316 }
317 
318 static int br_mdb_fill_info(struct sk_buff *skb, struct netlink_callback *cb,
319 			    struct net_device *dev)
320 {
321 	int idx = 0, s_idx = cb->args[1], err = 0, pidx = 0, s_pidx = cb->args[2];
322 	struct net_bridge *br = netdev_priv(dev);
323 	struct net_bridge_mdb_entry *mp;
324 	struct nlattr *nest, *nest2;
325 
326 	nest = nla_nest_start_noflag(skb, MDBA_MDB);
327 	if (nest == NULL)
328 		return -EMSGSIZE;
329 
330 	hlist_for_each_entry_rcu(mp, &br->mdb_list, mdb_node) {
331 		struct net_bridge_port_group *p;
332 		struct net_bridge_port_group __rcu **pp;
333 
334 		if (idx < s_idx)
335 			goto skip;
336 
337 		nest2 = nla_nest_start_noflag(skb, MDBA_MDB_ENTRY);
338 		if (!nest2) {
339 			err = -EMSGSIZE;
340 			break;
341 		}
342 
343 		if (!s_pidx && mp->host_joined) {
344 			err = __mdb_fill_info(skb, mp, NULL);
345 			if (err) {
346 				nla_nest_cancel(skb, nest2);
347 				break;
348 			}
349 		}
350 
351 		for (pp = &mp->ports; (p = rcu_dereference(*pp)) != NULL;
352 		      pp = &p->next) {
353 			if (!p->key.port)
354 				continue;
355 			if (pidx < s_pidx)
356 				goto skip_pg;
357 
358 			err = __mdb_fill_info(skb, mp, p);
359 			if (err) {
360 				nla_nest_end(skb, nest2);
361 				goto out;
362 			}
363 skip_pg:
364 			pidx++;
365 		}
366 		pidx = 0;
367 		s_pidx = 0;
368 		nla_nest_end(skb, nest2);
369 skip:
370 		idx++;
371 	}
372 
373 out:
374 	cb->args[1] = idx;
375 	cb->args[2] = pidx;
376 	nla_nest_end(skb, nest);
377 	return err;
378 }
379 
380 int br_mdb_dump(struct net_device *dev, struct sk_buff *skb,
381 		struct netlink_callback *cb)
382 {
383 	struct net_bridge *br = netdev_priv(dev);
384 	struct br_port_msg *bpm;
385 	struct nlmsghdr *nlh;
386 	int err;
387 
388 	nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid,
389 			cb->nlh->nlmsg_seq, RTM_GETMDB, sizeof(*bpm),
390 			NLM_F_MULTI);
391 	if (!nlh)
392 		return -EMSGSIZE;
393 
394 	bpm = nlmsg_data(nlh);
395 	memset(bpm, 0, sizeof(*bpm));
396 	bpm->ifindex = dev->ifindex;
397 
398 	rcu_read_lock();
399 
400 	err = br_mdb_fill_info(skb, cb, dev);
401 	if (err)
402 		goto out;
403 	err = br_rports_fill_info(skb, &br->multicast_ctx);
404 	if (err)
405 		goto out;
406 
407 out:
408 	rcu_read_unlock();
409 	nlmsg_end(skb, nlh);
410 	return err;
411 }
412 
413 static int nlmsg_populate_mdb_fill(struct sk_buff *skb,
414 				   struct net_device *dev,
415 				   struct net_bridge_mdb_entry *mp,
416 				   struct net_bridge_port_group *pg,
417 				   int type)
418 {
419 	struct nlmsghdr *nlh;
420 	struct br_port_msg *bpm;
421 	struct nlattr *nest, *nest2;
422 
423 	nlh = nlmsg_put(skb, 0, 0, type, sizeof(*bpm), 0);
424 	if (!nlh)
425 		return -EMSGSIZE;
426 
427 	bpm = nlmsg_data(nlh);
428 	memset(bpm, 0, sizeof(*bpm));
429 	bpm->family  = AF_BRIDGE;
430 	bpm->ifindex = dev->ifindex;
431 	nest = nla_nest_start_noflag(skb, MDBA_MDB);
432 	if (nest == NULL)
433 		goto cancel;
434 	nest2 = nla_nest_start_noflag(skb, MDBA_MDB_ENTRY);
435 	if (nest2 == NULL)
436 		goto end;
437 
438 	if (__mdb_fill_info(skb, mp, pg))
439 		goto end;
440 
441 	nla_nest_end(skb, nest2);
442 	nla_nest_end(skb, nest);
443 	nlmsg_end(skb, nlh);
444 	return 0;
445 
446 end:
447 	nla_nest_end(skb, nest);
448 cancel:
449 	nlmsg_cancel(skb, nlh);
450 	return -EMSGSIZE;
451 }
452 
453 static size_t rtnl_mdb_nlmsg_pg_size(const struct net_bridge_port_group *pg)
454 {
455 	struct net_bridge_group_src *ent;
456 	size_t nlmsg_size, addr_size = 0;
457 
458 		     /* MDBA_MDB_ENTRY_INFO */
459 	nlmsg_size = nla_total_size(sizeof(struct br_mdb_entry)) +
460 		     /* MDBA_MDB_EATTR_TIMER */
461 		     nla_total_size(sizeof(u32));
462 
463 	if (!pg)
464 		goto out;
465 
466 	/* MDBA_MDB_EATTR_RTPROT */
467 	nlmsg_size += nla_total_size(sizeof(u8));
468 
469 	switch (pg->key.addr.proto) {
470 	case htons(ETH_P_IP):
471 		/* MDBA_MDB_EATTR_SOURCE */
472 		if (pg->key.addr.src.ip4)
473 			nlmsg_size += nla_total_size(sizeof(__be32));
474 		if (pg->key.port->br->multicast_ctx.multicast_igmp_version == 2)
475 			goto out;
476 		addr_size = sizeof(__be32);
477 		break;
478 #if IS_ENABLED(CONFIG_IPV6)
479 	case htons(ETH_P_IPV6):
480 		/* MDBA_MDB_EATTR_SOURCE */
481 		if (!ipv6_addr_any(&pg->key.addr.src.ip6))
482 			nlmsg_size += nla_total_size(sizeof(struct in6_addr));
483 		if (pg->key.port->br->multicast_ctx.multicast_mld_version == 1)
484 			goto out;
485 		addr_size = sizeof(struct in6_addr);
486 		break;
487 #endif
488 	}
489 
490 	/* MDBA_MDB_EATTR_GROUP_MODE */
491 	nlmsg_size += nla_total_size(sizeof(u8));
492 
493 	/* MDBA_MDB_EATTR_SRC_LIST nested attr */
494 	if (!hlist_empty(&pg->src_list))
495 		nlmsg_size += nla_total_size(0);
496 
497 	hlist_for_each_entry(ent, &pg->src_list, node) {
498 		/* MDBA_MDB_SRCLIST_ENTRY nested attr +
499 		 * MDBA_MDB_SRCATTR_ADDRESS + MDBA_MDB_SRCATTR_TIMER
500 		 */
501 		nlmsg_size += nla_total_size(0) +
502 			      nla_total_size(addr_size) +
503 			      nla_total_size(sizeof(u32));
504 	}
505 out:
506 	return nlmsg_size;
507 }
508 
509 static size_t rtnl_mdb_nlmsg_size(const struct net_bridge_port_group *pg)
510 {
511 	return NLMSG_ALIGN(sizeof(struct br_port_msg)) +
512 	       /* MDBA_MDB */
513 	       nla_total_size(0) +
514 	       /* MDBA_MDB_ENTRY */
515 	       nla_total_size(0) +
516 	       /* Port group entry */
517 	       rtnl_mdb_nlmsg_pg_size(pg);
518 }
519 
520 void br_mdb_notify(struct net_device *dev,
521 		   struct net_bridge_mdb_entry *mp,
522 		   struct net_bridge_port_group *pg,
523 		   int type)
524 {
525 	struct net *net = dev_net(dev);
526 	struct sk_buff *skb;
527 	int err = -ENOBUFS;
528 
529 	br_switchdev_mdb_notify(dev, mp, pg, type);
530 
531 	skb = nlmsg_new(rtnl_mdb_nlmsg_size(pg), GFP_ATOMIC);
532 	if (!skb)
533 		goto errout;
534 
535 	err = nlmsg_populate_mdb_fill(skb, dev, mp, pg, type);
536 	if (err < 0) {
537 		kfree_skb(skb);
538 		goto errout;
539 	}
540 
541 	rtnl_notify(skb, net, 0, RTNLGRP_MDB, NULL, GFP_ATOMIC);
542 	return;
543 errout:
544 	rtnl_set_sk_err(net, RTNLGRP_MDB, err);
545 }
546 
547 static int nlmsg_populate_rtr_fill(struct sk_buff *skb,
548 				   struct net_device *dev,
549 				   int ifindex, u16 vid, u32 pid,
550 				   u32 seq, int type, unsigned int flags)
551 {
552 	struct nlattr *nest, *port_nest;
553 	struct br_port_msg *bpm;
554 	struct nlmsghdr *nlh;
555 
556 	nlh = nlmsg_put(skb, pid, seq, type, sizeof(*bpm), 0);
557 	if (!nlh)
558 		return -EMSGSIZE;
559 
560 	bpm = nlmsg_data(nlh);
561 	memset(bpm, 0, sizeof(*bpm));
562 	bpm->family = AF_BRIDGE;
563 	bpm->ifindex = dev->ifindex;
564 	nest = nla_nest_start_noflag(skb, MDBA_ROUTER);
565 	if (!nest)
566 		goto cancel;
567 
568 	port_nest = nla_nest_start_noflag(skb, MDBA_ROUTER_PORT);
569 	if (!port_nest)
570 		goto end;
571 	if (nla_put_nohdr(skb, sizeof(u32), &ifindex)) {
572 		nla_nest_cancel(skb, port_nest);
573 		goto end;
574 	}
575 	if (vid && nla_put_u16(skb, MDBA_ROUTER_PATTR_VID, vid)) {
576 		nla_nest_cancel(skb, port_nest);
577 		goto end;
578 	}
579 	nla_nest_end(skb, port_nest);
580 
581 	nla_nest_end(skb, nest);
582 	nlmsg_end(skb, nlh);
583 	return 0;
584 
585 end:
586 	nla_nest_end(skb, nest);
587 cancel:
588 	nlmsg_cancel(skb, nlh);
589 	return -EMSGSIZE;
590 }
591 
592 static inline size_t rtnl_rtr_nlmsg_size(void)
593 {
594 	return NLMSG_ALIGN(sizeof(struct br_port_msg))
595 		+ nla_total_size(sizeof(__u32))
596 		+ nla_total_size(sizeof(u16));
597 }
598 
599 void br_rtr_notify(struct net_device *dev, struct net_bridge_mcast_port *pmctx,
600 		   int type)
601 {
602 	struct net *net = dev_net(dev);
603 	struct sk_buff *skb;
604 	int err = -ENOBUFS;
605 	int ifindex;
606 	u16 vid;
607 
608 	ifindex = pmctx ? pmctx->port->dev->ifindex : 0;
609 	vid = pmctx && br_multicast_port_ctx_is_vlan(pmctx) ? pmctx->vlan->vid :
610 							      0;
611 	skb = nlmsg_new(rtnl_rtr_nlmsg_size(), GFP_ATOMIC);
612 	if (!skb)
613 		goto errout;
614 
615 	err = nlmsg_populate_rtr_fill(skb, dev, ifindex, vid, 0, 0, type,
616 				      NTF_SELF);
617 	if (err < 0) {
618 		kfree_skb(skb);
619 		goto errout;
620 	}
621 
622 	rtnl_notify(skb, net, 0, RTNLGRP_MDB, NULL, GFP_ATOMIC);
623 	return;
624 
625 errout:
626 	rtnl_set_sk_err(net, RTNLGRP_MDB, err);
627 }
628 
629 static const struct nla_policy
630 br_mdbe_src_list_entry_pol[MDBE_SRCATTR_MAX + 1] = {
631 	[MDBE_SRCATTR_ADDRESS] = NLA_POLICY_RANGE(NLA_BINARY,
632 						  sizeof(struct in_addr),
633 						  sizeof(struct in6_addr)),
634 };
635 
636 static const struct nla_policy
637 br_mdbe_src_list_pol[MDBE_SRC_LIST_MAX + 1] = {
638 	[MDBE_SRC_LIST_ENTRY] = NLA_POLICY_NESTED(br_mdbe_src_list_entry_pol),
639 };
640 
641 static const struct nla_policy br_mdbe_attrs_pol[MDBE_ATTR_MAX + 1] = {
642 	[MDBE_ATTR_SOURCE] = NLA_POLICY_RANGE(NLA_BINARY,
643 					      sizeof(struct in_addr),
644 					      sizeof(struct in6_addr)),
645 	[MDBE_ATTR_GROUP_MODE] = NLA_POLICY_RANGE(NLA_U8, MCAST_EXCLUDE,
646 						  MCAST_INCLUDE),
647 	[MDBE_ATTR_SRC_LIST] = NLA_POLICY_NESTED(br_mdbe_src_list_pol),
648 	[MDBE_ATTR_RTPROT] = NLA_POLICY_MIN(NLA_U8, RTPROT_STATIC),
649 };
650 
651 static bool is_valid_mdb_source(struct nlattr *attr, __be16 proto,
652 				struct netlink_ext_ack *extack)
653 {
654 	switch (proto) {
655 	case htons(ETH_P_IP):
656 		if (nla_len(attr) != sizeof(struct in_addr)) {
657 			NL_SET_ERR_MSG_MOD(extack, "IPv4 invalid source address length");
658 			return false;
659 		}
660 		if (ipv4_is_multicast(nla_get_in_addr(attr))) {
661 			NL_SET_ERR_MSG_MOD(extack, "IPv4 multicast source address is not allowed");
662 			return false;
663 		}
664 		break;
665 #if IS_ENABLED(CONFIG_IPV6)
666 	case htons(ETH_P_IPV6): {
667 		struct in6_addr src;
668 
669 		if (nla_len(attr) != sizeof(struct in6_addr)) {
670 			NL_SET_ERR_MSG_MOD(extack, "IPv6 invalid source address length");
671 			return false;
672 		}
673 		src = nla_get_in6_addr(attr);
674 		if (ipv6_addr_is_multicast(&src)) {
675 			NL_SET_ERR_MSG_MOD(extack, "IPv6 multicast source address is not allowed");
676 			return false;
677 		}
678 		break;
679 	}
680 #endif
681 	default:
682 		NL_SET_ERR_MSG_MOD(extack, "Invalid protocol used with source address");
683 		return false;
684 	}
685 
686 	return true;
687 }
688 
689 static struct net_bridge_mcast *
690 __br_mdb_choose_context(struct net_bridge *br,
691 			const struct br_mdb_entry *entry,
692 			struct netlink_ext_ack *extack)
693 {
694 	struct net_bridge_mcast *brmctx = NULL;
695 	struct net_bridge_vlan *v;
696 
697 	if (!br_opt_get(br, BROPT_MCAST_VLAN_SNOOPING_ENABLED)) {
698 		brmctx = &br->multicast_ctx;
699 		goto out;
700 	}
701 
702 	if (!entry->vid) {
703 		NL_SET_ERR_MSG_MOD(extack, "Cannot add an entry without a vlan when vlan snooping is enabled");
704 		goto out;
705 	}
706 
707 	v = br_vlan_find(br_vlan_group(br), entry->vid);
708 	if (!v) {
709 		NL_SET_ERR_MSG_MOD(extack, "Vlan is not configured");
710 		goto out;
711 	}
712 	if (br_multicast_ctx_vlan_global_disabled(&v->br_mcast_ctx)) {
713 		NL_SET_ERR_MSG_MOD(extack, "Vlan's multicast processing is disabled");
714 		goto out;
715 	}
716 	brmctx = &v->br_mcast_ctx;
717 out:
718 	return brmctx;
719 }
720 
721 static int br_mdb_replace_group_sg(const struct br_mdb_config *cfg,
722 				   struct net_bridge_mdb_entry *mp,
723 				   struct net_bridge_port_group *pg,
724 				   struct net_bridge_mcast *brmctx,
725 				   unsigned char flags)
726 {
727 	unsigned long now = jiffies;
728 
729 	pg->flags = flags;
730 	pg->rt_protocol = cfg->rt_protocol;
731 	if (!(flags & MDB_PG_FLAGS_PERMANENT) && !cfg->src_entry)
732 		mod_timer(&pg->timer,
733 			  now + brmctx->multicast_membership_interval);
734 	else
735 		del_timer(&pg->timer);
736 
737 	br_mdb_notify(cfg->br->dev, mp, pg, RTM_NEWMDB);
738 
739 	return 0;
740 }
741 
742 static int br_mdb_add_group_sg(const struct br_mdb_config *cfg,
743 			       struct net_bridge_mdb_entry *mp,
744 			       struct net_bridge_mcast *brmctx,
745 			       unsigned char flags,
746 			       struct netlink_ext_ack *extack)
747 {
748 	struct net_bridge_port_group __rcu **pp;
749 	struct net_bridge_port_group *p;
750 	unsigned long now = jiffies;
751 
752 	for (pp = &mp->ports;
753 	     (p = mlock_dereference(*pp, cfg->br)) != NULL;
754 	     pp = &p->next) {
755 		if (p->key.port == cfg->p) {
756 			if (!(cfg->nlflags & NLM_F_REPLACE)) {
757 				NL_SET_ERR_MSG_MOD(extack, "(S, G) group is already joined by port");
758 				return -EEXIST;
759 			}
760 			return br_mdb_replace_group_sg(cfg, mp, p, brmctx,
761 						       flags);
762 		}
763 		if ((unsigned long)p->key.port < (unsigned long)cfg->p)
764 			break;
765 	}
766 
767 	p = br_multicast_new_port_group(cfg->p, &cfg->group, *pp, flags, NULL,
768 					MCAST_INCLUDE, cfg->rt_protocol, extack);
769 	if (unlikely(!p))
770 		return -ENOMEM;
771 
772 	rcu_assign_pointer(*pp, p);
773 	if (!(flags & MDB_PG_FLAGS_PERMANENT) && !cfg->src_entry)
774 		mod_timer(&p->timer,
775 			  now + brmctx->multicast_membership_interval);
776 	br_mdb_notify(cfg->br->dev, mp, p, RTM_NEWMDB);
777 
778 	/* All of (*, G) EXCLUDE ports need to be added to the new (S, G) for
779 	 * proper replication.
780 	 */
781 	if (br_multicast_should_handle_mode(brmctx, cfg->group.proto)) {
782 		struct net_bridge_mdb_entry *star_mp;
783 		struct br_ip star_group;
784 
785 		star_group = p->key.addr;
786 		memset(&star_group.src, 0, sizeof(star_group.src));
787 		star_mp = br_mdb_ip_get(cfg->br, &star_group);
788 		if (star_mp)
789 			br_multicast_sg_add_exclude_ports(star_mp, p);
790 	}
791 
792 	return 0;
793 }
794 
795 static int br_mdb_add_group_src_fwd(const struct br_mdb_config *cfg,
796 				    struct br_ip *src_ip,
797 				    struct net_bridge_mcast *brmctx,
798 				    struct netlink_ext_ack *extack)
799 {
800 	struct net_bridge_mdb_entry *sgmp;
801 	struct br_mdb_config sg_cfg;
802 	struct br_ip sg_ip;
803 	u8 flags = 0;
804 
805 	sg_ip = cfg->group;
806 	sg_ip.src = src_ip->src;
807 	sgmp = br_multicast_new_group(cfg->br, &sg_ip);
808 	if (IS_ERR(sgmp)) {
809 		NL_SET_ERR_MSG_MOD(extack, "Failed to add (S, G) MDB entry");
810 		return PTR_ERR(sgmp);
811 	}
812 
813 	if (cfg->entry->state == MDB_PERMANENT)
814 		flags |= MDB_PG_FLAGS_PERMANENT;
815 	if (cfg->filter_mode == MCAST_EXCLUDE)
816 		flags |= MDB_PG_FLAGS_BLOCKED;
817 
818 	memset(&sg_cfg, 0, sizeof(sg_cfg));
819 	sg_cfg.br = cfg->br;
820 	sg_cfg.p = cfg->p;
821 	sg_cfg.entry = cfg->entry;
822 	sg_cfg.group = sg_ip;
823 	sg_cfg.src_entry = true;
824 	sg_cfg.filter_mode = MCAST_INCLUDE;
825 	sg_cfg.rt_protocol = cfg->rt_protocol;
826 	sg_cfg.nlflags = cfg->nlflags;
827 	return br_mdb_add_group_sg(&sg_cfg, sgmp, brmctx, flags, extack);
828 }
829 
830 static int br_mdb_add_group_src(const struct br_mdb_config *cfg,
831 				struct net_bridge_port_group *pg,
832 				struct net_bridge_mcast *brmctx,
833 				struct br_mdb_src_entry *src,
834 				struct netlink_ext_ack *extack)
835 {
836 	struct net_bridge_group_src *ent;
837 	unsigned long now = jiffies;
838 	int err;
839 
840 	ent = br_multicast_find_group_src(pg, &src->addr);
841 	if (!ent) {
842 		ent = br_multicast_new_group_src(pg, &src->addr);
843 		if (!ent) {
844 			NL_SET_ERR_MSG_MOD(extack, "Failed to add new source entry");
845 			return -ENOSPC;
846 		}
847 	} else if (!(cfg->nlflags & NLM_F_REPLACE)) {
848 		NL_SET_ERR_MSG_MOD(extack, "Source entry already exists");
849 		return -EEXIST;
850 	}
851 
852 	if (cfg->filter_mode == MCAST_INCLUDE &&
853 	    cfg->entry->state == MDB_TEMPORARY)
854 		mod_timer(&ent->timer, now + br_multicast_gmi(brmctx));
855 	else
856 		del_timer(&ent->timer);
857 
858 	/* Install a (S, G) forwarding entry for the source. */
859 	err = br_mdb_add_group_src_fwd(cfg, &src->addr, brmctx, extack);
860 	if (err)
861 		goto err_del_sg;
862 
863 	ent->flags = BR_SGRP_F_INSTALLED | BR_SGRP_F_USER_ADDED;
864 
865 	return 0;
866 
867 err_del_sg:
868 	__br_multicast_del_group_src(ent);
869 	return err;
870 }
871 
872 static void br_mdb_del_group_src(struct net_bridge_port_group *pg,
873 				 struct br_mdb_src_entry *src)
874 {
875 	struct net_bridge_group_src *ent;
876 
877 	ent = br_multicast_find_group_src(pg, &src->addr);
878 	if (WARN_ON_ONCE(!ent))
879 		return;
880 	br_multicast_del_group_src(ent, false);
881 }
882 
883 static int br_mdb_add_group_srcs(const struct br_mdb_config *cfg,
884 				 struct net_bridge_port_group *pg,
885 				 struct net_bridge_mcast *brmctx,
886 				 struct netlink_ext_ack *extack)
887 {
888 	int i, err;
889 
890 	for (i = 0; i < cfg->num_src_entries; i++) {
891 		err = br_mdb_add_group_src(cfg, pg, brmctx,
892 					   &cfg->src_entries[i], extack);
893 		if (err)
894 			goto err_del_group_srcs;
895 	}
896 
897 	return 0;
898 
899 err_del_group_srcs:
900 	for (i--; i >= 0; i--)
901 		br_mdb_del_group_src(pg, &cfg->src_entries[i]);
902 	return err;
903 }
904 
905 static int br_mdb_replace_group_srcs(const struct br_mdb_config *cfg,
906 				     struct net_bridge_port_group *pg,
907 				     struct net_bridge_mcast *brmctx,
908 				     struct netlink_ext_ack *extack)
909 {
910 	struct net_bridge_group_src *ent;
911 	struct hlist_node *tmp;
912 	int err;
913 
914 	hlist_for_each_entry(ent, &pg->src_list, node)
915 		ent->flags |= BR_SGRP_F_DELETE;
916 
917 	err = br_mdb_add_group_srcs(cfg, pg, brmctx, extack);
918 	if (err)
919 		goto err_clear_delete;
920 
921 	hlist_for_each_entry_safe(ent, tmp, &pg->src_list, node) {
922 		if (ent->flags & BR_SGRP_F_DELETE)
923 			br_multicast_del_group_src(ent, false);
924 	}
925 
926 	return 0;
927 
928 err_clear_delete:
929 	hlist_for_each_entry(ent, &pg->src_list, node)
930 		ent->flags &= ~BR_SGRP_F_DELETE;
931 	return err;
932 }
933 
934 static int br_mdb_replace_group_star_g(const struct br_mdb_config *cfg,
935 				       struct net_bridge_mdb_entry *mp,
936 				       struct net_bridge_port_group *pg,
937 				       struct net_bridge_mcast *brmctx,
938 				       unsigned char flags,
939 				       struct netlink_ext_ack *extack)
940 {
941 	unsigned long now = jiffies;
942 	int err;
943 
944 	err = br_mdb_replace_group_srcs(cfg, pg, brmctx, extack);
945 	if (err)
946 		return err;
947 
948 	pg->flags = flags;
949 	pg->filter_mode = cfg->filter_mode;
950 	pg->rt_protocol = cfg->rt_protocol;
951 	if (!(flags & MDB_PG_FLAGS_PERMANENT) &&
952 	    cfg->filter_mode == MCAST_EXCLUDE)
953 		mod_timer(&pg->timer,
954 			  now + brmctx->multicast_membership_interval);
955 	else
956 		del_timer(&pg->timer);
957 
958 	br_mdb_notify(cfg->br->dev, mp, pg, RTM_NEWMDB);
959 
960 	if (br_multicast_should_handle_mode(brmctx, cfg->group.proto))
961 		br_multicast_star_g_handle_mode(pg, cfg->filter_mode);
962 
963 	return 0;
964 }
965 
966 static int br_mdb_add_group_star_g(const struct br_mdb_config *cfg,
967 				   struct net_bridge_mdb_entry *mp,
968 				   struct net_bridge_mcast *brmctx,
969 				   unsigned char flags,
970 				   struct netlink_ext_ack *extack)
971 {
972 	struct net_bridge_port_group __rcu **pp;
973 	struct net_bridge_port_group *p;
974 	unsigned long now = jiffies;
975 	int err;
976 
977 	for (pp = &mp->ports;
978 	     (p = mlock_dereference(*pp, cfg->br)) != NULL;
979 	     pp = &p->next) {
980 		if (p->key.port == cfg->p) {
981 			if (!(cfg->nlflags & NLM_F_REPLACE)) {
982 				NL_SET_ERR_MSG_MOD(extack, "(*, G) group is already joined by port");
983 				return -EEXIST;
984 			}
985 			return br_mdb_replace_group_star_g(cfg, mp, p, brmctx,
986 							   flags, extack);
987 		}
988 		if ((unsigned long)p->key.port < (unsigned long)cfg->p)
989 			break;
990 	}
991 
992 	p = br_multicast_new_port_group(cfg->p, &cfg->group, *pp, flags, NULL,
993 					cfg->filter_mode, cfg->rt_protocol,
994 					extack);
995 	if (unlikely(!p))
996 		return -ENOMEM;
997 
998 	err = br_mdb_add_group_srcs(cfg, p, brmctx, extack);
999 	if (err)
1000 		goto err_del_port_group;
1001 
1002 	rcu_assign_pointer(*pp, p);
1003 	if (!(flags & MDB_PG_FLAGS_PERMANENT) &&
1004 	    cfg->filter_mode == MCAST_EXCLUDE)
1005 		mod_timer(&p->timer,
1006 			  now + brmctx->multicast_membership_interval);
1007 	br_mdb_notify(cfg->br->dev, mp, p, RTM_NEWMDB);
1008 	/* If we are adding a new EXCLUDE port group (*, G), it needs to be
1009 	 * also added to all (S, G) entries for proper replication.
1010 	 */
1011 	if (br_multicast_should_handle_mode(brmctx, cfg->group.proto) &&
1012 	    cfg->filter_mode == MCAST_EXCLUDE)
1013 		br_multicast_star_g_handle_mode(p, MCAST_EXCLUDE);
1014 
1015 	return 0;
1016 
1017 err_del_port_group:
1018 	br_multicast_del_port_group(p);
1019 	return err;
1020 }
1021 
1022 static int br_mdb_add_group(const struct br_mdb_config *cfg,
1023 			    struct netlink_ext_ack *extack)
1024 {
1025 	struct br_mdb_entry *entry = cfg->entry;
1026 	struct net_bridge_port *port = cfg->p;
1027 	struct net_bridge_mdb_entry *mp;
1028 	struct net_bridge *br = cfg->br;
1029 	struct net_bridge_mcast *brmctx;
1030 	struct br_ip group = cfg->group;
1031 	unsigned char flags = 0;
1032 
1033 	brmctx = __br_mdb_choose_context(br, entry, extack);
1034 	if (!brmctx)
1035 		return -EINVAL;
1036 
1037 	mp = br_multicast_new_group(br, &group);
1038 	if (IS_ERR(mp))
1039 		return PTR_ERR(mp);
1040 
1041 	/* host join */
1042 	if (!port) {
1043 		if (mp->host_joined) {
1044 			NL_SET_ERR_MSG_MOD(extack, "Group is already joined by host");
1045 			return -EEXIST;
1046 		}
1047 
1048 		br_multicast_host_join(brmctx, mp, false);
1049 		br_mdb_notify(br->dev, mp, NULL, RTM_NEWMDB);
1050 
1051 		return 0;
1052 	}
1053 
1054 	if (entry->state == MDB_PERMANENT)
1055 		flags |= MDB_PG_FLAGS_PERMANENT;
1056 
1057 	if (br_multicast_is_star_g(&group))
1058 		return br_mdb_add_group_star_g(cfg, mp, brmctx, flags, extack);
1059 	else
1060 		return br_mdb_add_group_sg(cfg, mp, brmctx, flags, extack);
1061 }
1062 
1063 static int __br_mdb_add(const struct br_mdb_config *cfg,
1064 			struct netlink_ext_ack *extack)
1065 {
1066 	int ret;
1067 
1068 	spin_lock_bh(&cfg->br->multicast_lock);
1069 	ret = br_mdb_add_group(cfg, extack);
1070 	spin_unlock_bh(&cfg->br->multicast_lock);
1071 
1072 	return ret;
1073 }
1074 
1075 static int br_mdb_config_src_entry_init(struct nlattr *src_entry,
1076 					struct br_mdb_src_entry *src,
1077 					__be16 proto,
1078 					struct netlink_ext_ack *extack)
1079 {
1080 	struct nlattr *tb[MDBE_SRCATTR_MAX + 1];
1081 	int err;
1082 
1083 	err = nla_parse_nested(tb, MDBE_SRCATTR_MAX, src_entry,
1084 			       br_mdbe_src_list_entry_pol, extack);
1085 	if (err)
1086 		return err;
1087 
1088 	if (NL_REQ_ATTR_CHECK(extack, src_entry, tb, MDBE_SRCATTR_ADDRESS))
1089 		return -EINVAL;
1090 
1091 	if (!is_valid_mdb_source(tb[MDBE_SRCATTR_ADDRESS], proto, extack))
1092 		return -EINVAL;
1093 
1094 	src->addr.proto = proto;
1095 	nla_memcpy(&src->addr.src, tb[MDBE_SRCATTR_ADDRESS],
1096 		   nla_len(tb[MDBE_SRCATTR_ADDRESS]));
1097 
1098 	return 0;
1099 }
1100 
1101 static int br_mdb_config_src_list_init(struct nlattr *src_list,
1102 				       struct br_mdb_config *cfg,
1103 				       struct netlink_ext_ack *extack)
1104 {
1105 	struct nlattr *src_entry;
1106 	int rem, err;
1107 	int i = 0;
1108 
1109 	nla_for_each_nested(src_entry, src_list, rem)
1110 		cfg->num_src_entries++;
1111 
1112 	if (cfg->num_src_entries >= PG_SRC_ENT_LIMIT) {
1113 		NL_SET_ERR_MSG_FMT_MOD(extack, "Exceeded maximum number of source entries (%u)",
1114 				       PG_SRC_ENT_LIMIT - 1);
1115 		return -EINVAL;
1116 	}
1117 
1118 	cfg->src_entries = kcalloc(cfg->num_src_entries,
1119 				   sizeof(struct br_mdb_src_entry), GFP_KERNEL);
1120 	if (!cfg->src_entries)
1121 		return -ENOMEM;
1122 
1123 	nla_for_each_nested(src_entry, src_list, rem) {
1124 		err = br_mdb_config_src_entry_init(src_entry,
1125 						   &cfg->src_entries[i],
1126 						   cfg->entry->addr.proto,
1127 						   extack);
1128 		if (err)
1129 			goto err_src_entry_init;
1130 		i++;
1131 	}
1132 
1133 	return 0;
1134 
1135 err_src_entry_init:
1136 	kfree(cfg->src_entries);
1137 	return err;
1138 }
1139 
1140 static void br_mdb_config_src_list_fini(struct br_mdb_config *cfg)
1141 {
1142 	kfree(cfg->src_entries);
1143 }
1144 
1145 static int br_mdb_config_attrs_init(struct nlattr *set_attrs,
1146 				    struct br_mdb_config *cfg,
1147 				    struct netlink_ext_ack *extack)
1148 {
1149 	struct nlattr *mdb_attrs[MDBE_ATTR_MAX + 1];
1150 	int err;
1151 
1152 	err = nla_parse_nested(mdb_attrs, MDBE_ATTR_MAX, set_attrs,
1153 			       br_mdbe_attrs_pol, extack);
1154 	if (err)
1155 		return err;
1156 
1157 	if (mdb_attrs[MDBE_ATTR_SOURCE] &&
1158 	    !is_valid_mdb_source(mdb_attrs[MDBE_ATTR_SOURCE],
1159 				 cfg->entry->addr.proto, extack))
1160 		return -EINVAL;
1161 
1162 	__mdb_entry_to_br_ip(cfg->entry, &cfg->group, mdb_attrs);
1163 
1164 	if (mdb_attrs[MDBE_ATTR_GROUP_MODE]) {
1165 		if (!cfg->p) {
1166 			NL_SET_ERR_MSG_MOD(extack, "Filter mode cannot be set for host groups");
1167 			return -EINVAL;
1168 		}
1169 		if (!br_multicast_is_star_g(&cfg->group)) {
1170 			NL_SET_ERR_MSG_MOD(extack, "Filter mode can only be set for (*, G) entries");
1171 			return -EINVAL;
1172 		}
1173 		cfg->filter_mode = nla_get_u8(mdb_attrs[MDBE_ATTR_GROUP_MODE]);
1174 	} else {
1175 		cfg->filter_mode = MCAST_EXCLUDE;
1176 	}
1177 
1178 	if (mdb_attrs[MDBE_ATTR_SRC_LIST]) {
1179 		if (!cfg->p) {
1180 			NL_SET_ERR_MSG_MOD(extack, "Source list cannot be set for host groups");
1181 			return -EINVAL;
1182 		}
1183 		if (!br_multicast_is_star_g(&cfg->group)) {
1184 			NL_SET_ERR_MSG_MOD(extack, "Source list can only be set for (*, G) entries");
1185 			return -EINVAL;
1186 		}
1187 		if (!mdb_attrs[MDBE_ATTR_GROUP_MODE]) {
1188 			NL_SET_ERR_MSG_MOD(extack, "Source list cannot be set without filter mode");
1189 			return -EINVAL;
1190 		}
1191 		err = br_mdb_config_src_list_init(mdb_attrs[MDBE_ATTR_SRC_LIST],
1192 						  cfg, extack);
1193 		if (err)
1194 			return err;
1195 	}
1196 
1197 	if (!cfg->num_src_entries && cfg->filter_mode == MCAST_INCLUDE) {
1198 		NL_SET_ERR_MSG_MOD(extack, "Cannot add (*, G) INCLUDE with an empty source list");
1199 		return -EINVAL;
1200 	}
1201 
1202 	if (mdb_attrs[MDBE_ATTR_RTPROT]) {
1203 		if (!cfg->p) {
1204 			NL_SET_ERR_MSG_MOD(extack, "Protocol cannot be set for host groups");
1205 			return -EINVAL;
1206 		}
1207 		cfg->rt_protocol = nla_get_u8(mdb_attrs[MDBE_ATTR_RTPROT]);
1208 	}
1209 
1210 	return 0;
1211 }
1212 
1213 static int br_mdb_config_init(struct br_mdb_config *cfg, struct net_device *dev,
1214 			      struct nlattr *tb[], u16 nlmsg_flags,
1215 			      struct netlink_ext_ack *extack)
1216 {
1217 	struct net *net = dev_net(dev);
1218 
1219 	memset(cfg, 0, sizeof(*cfg));
1220 	cfg->filter_mode = MCAST_EXCLUDE;
1221 	cfg->rt_protocol = RTPROT_STATIC;
1222 	cfg->nlflags = nlmsg_flags;
1223 
1224 	cfg->br = netdev_priv(dev);
1225 
1226 	if (!netif_running(cfg->br->dev)) {
1227 		NL_SET_ERR_MSG_MOD(extack, "Bridge device is not running");
1228 		return -EINVAL;
1229 	}
1230 
1231 	if (!br_opt_get(cfg->br, BROPT_MULTICAST_ENABLED)) {
1232 		NL_SET_ERR_MSG_MOD(extack, "Bridge's multicast processing is disabled");
1233 		return -EINVAL;
1234 	}
1235 
1236 	cfg->entry = nla_data(tb[MDBA_SET_ENTRY]);
1237 
1238 	if (cfg->entry->ifindex != cfg->br->dev->ifindex) {
1239 		struct net_device *pdev;
1240 
1241 		pdev = __dev_get_by_index(net, cfg->entry->ifindex);
1242 		if (!pdev) {
1243 			NL_SET_ERR_MSG_MOD(extack, "Port net device doesn't exist");
1244 			return -ENODEV;
1245 		}
1246 
1247 		cfg->p = br_port_get_rtnl(pdev);
1248 		if (!cfg->p) {
1249 			NL_SET_ERR_MSG_MOD(extack, "Net device is not a bridge port");
1250 			return -EINVAL;
1251 		}
1252 
1253 		if (cfg->p->br != cfg->br) {
1254 			NL_SET_ERR_MSG_MOD(extack, "Port belongs to a different bridge device");
1255 			return -EINVAL;
1256 		}
1257 	}
1258 
1259 	if (cfg->entry->addr.proto == htons(ETH_P_IP) &&
1260 	    ipv4_is_zeronet(cfg->entry->addr.u.ip4)) {
1261 		NL_SET_ERR_MSG_MOD(extack, "IPv4 entry group address 0.0.0.0 is not allowed");
1262 		return -EINVAL;
1263 	}
1264 
1265 	if (tb[MDBA_SET_ENTRY_ATTRS])
1266 		return br_mdb_config_attrs_init(tb[MDBA_SET_ENTRY_ATTRS], cfg,
1267 						extack);
1268 	else
1269 		__mdb_entry_to_br_ip(cfg->entry, &cfg->group, NULL);
1270 
1271 	return 0;
1272 }
1273 
1274 static void br_mdb_config_fini(struct br_mdb_config *cfg)
1275 {
1276 	br_mdb_config_src_list_fini(cfg);
1277 }
1278 
1279 int br_mdb_add(struct net_device *dev, struct nlattr *tb[], u16 nlmsg_flags,
1280 	       struct netlink_ext_ack *extack)
1281 {
1282 	struct net_bridge_vlan_group *vg;
1283 	struct net_bridge_vlan *v;
1284 	struct br_mdb_config cfg;
1285 	int err;
1286 
1287 	err = br_mdb_config_init(&cfg, dev, tb, nlmsg_flags, extack);
1288 	if (err)
1289 		return err;
1290 
1291 	err = -EINVAL;
1292 	/* host join errors which can happen before creating the group */
1293 	if (!cfg.p && !br_group_is_l2(&cfg.group)) {
1294 		/* don't allow any flags for host-joined IP groups */
1295 		if (cfg.entry->state) {
1296 			NL_SET_ERR_MSG_MOD(extack, "Flags are not allowed for host groups");
1297 			goto out;
1298 		}
1299 		if (!br_multicast_is_star_g(&cfg.group)) {
1300 			NL_SET_ERR_MSG_MOD(extack, "Groups with sources cannot be manually host joined");
1301 			goto out;
1302 		}
1303 	}
1304 
1305 	if (br_group_is_l2(&cfg.group) && cfg.entry->state != MDB_PERMANENT) {
1306 		NL_SET_ERR_MSG_MOD(extack, "Only permanent L2 entries allowed");
1307 		goto out;
1308 	}
1309 
1310 	if (cfg.p) {
1311 		if (cfg.p->state == BR_STATE_DISABLED && cfg.entry->state != MDB_PERMANENT) {
1312 			NL_SET_ERR_MSG_MOD(extack, "Port is in disabled state and entry is not permanent");
1313 			goto out;
1314 		}
1315 		vg = nbp_vlan_group(cfg.p);
1316 	} else {
1317 		vg = br_vlan_group(cfg.br);
1318 	}
1319 
1320 	/* If vlan filtering is enabled and VLAN is not specified
1321 	 * install mdb entry on all vlans configured on the port.
1322 	 */
1323 	if (br_vlan_enabled(cfg.br->dev) && vg && cfg.entry->vid == 0) {
1324 		list_for_each_entry(v, &vg->vlan_list, vlist) {
1325 			cfg.entry->vid = v->vid;
1326 			cfg.group.vid = v->vid;
1327 			err = __br_mdb_add(&cfg, extack);
1328 			if (err)
1329 				break;
1330 		}
1331 	} else {
1332 		err = __br_mdb_add(&cfg, extack);
1333 	}
1334 
1335 out:
1336 	br_mdb_config_fini(&cfg);
1337 	return err;
1338 }
1339 
1340 static int __br_mdb_del(const struct br_mdb_config *cfg)
1341 {
1342 	struct br_mdb_entry *entry = cfg->entry;
1343 	struct net_bridge *br = cfg->br;
1344 	struct net_bridge_mdb_entry *mp;
1345 	struct net_bridge_port_group *p;
1346 	struct net_bridge_port_group __rcu **pp;
1347 	struct br_ip ip = cfg->group;
1348 	int err = -EINVAL;
1349 
1350 	spin_lock_bh(&br->multicast_lock);
1351 	mp = br_mdb_ip_get(br, &ip);
1352 	if (!mp)
1353 		goto unlock;
1354 
1355 	/* host leave */
1356 	if (entry->ifindex == mp->br->dev->ifindex && mp->host_joined) {
1357 		br_multicast_host_leave(mp, false);
1358 		err = 0;
1359 		br_mdb_notify(br->dev, mp, NULL, RTM_DELMDB);
1360 		if (!mp->ports && netif_running(br->dev))
1361 			mod_timer(&mp->timer, jiffies);
1362 		goto unlock;
1363 	}
1364 
1365 	for (pp = &mp->ports;
1366 	     (p = mlock_dereference(*pp, br)) != NULL;
1367 	     pp = &p->next) {
1368 		if (!p->key.port || p->key.port->dev->ifindex != entry->ifindex)
1369 			continue;
1370 
1371 		br_multicast_del_pg(mp, p, pp);
1372 		err = 0;
1373 		break;
1374 	}
1375 
1376 unlock:
1377 	spin_unlock_bh(&br->multicast_lock);
1378 	return err;
1379 }
1380 
1381 int br_mdb_del(struct net_device *dev, struct nlattr *tb[],
1382 	       struct netlink_ext_ack *extack)
1383 {
1384 	struct net_bridge_vlan_group *vg;
1385 	struct net_bridge_vlan *v;
1386 	struct br_mdb_config cfg;
1387 	int err;
1388 
1389 	err = br_mdb_config_init(&cfg, dev, tb, 0, extack);
1390 	if (err)
1391 		return err;
1392 
1393 	if (cfg.p)
1394 		vg = nbp_vlan_group(cfg.p);
1395 	else
1396 		vg = br_vlan_group(cfg.br);
1397 
1398 	/* If vlan filtering is enabled and VLAN is not specified
1399 	 * delete mdb entry on all vlans configured on the port.
1400 	 */
1401 	if (br_vlan_enabled(cfg.br->dev) && vg && cfg.entry->vid == 0) {
1402 		list_for_each_entry(v, &vg->vlan_list, vlist) {
1403 			cfg.entry->vid = v->vid;
1404 			cfg.group.vid = v->vid;
1405 			err = __br_mdb_del(&cfg);
1406 		}
1407 	} else {
1408 		err = __br_mdb_del(&cfg);
1409 	}
1410 
1411 	br_mdb_config_fini(&cfg);
1412 	return err;
1413 }
1414 
1415 struct br_mdb_flush_desc {
1416 	u32 port_ifindex;
1417 	u16 vid;
1418 	u8 rt_protocol;
1419 	u8 state;
1420 	u8 state_mask;
1421 };
1422 
1423 static const struct nla_policy br_mdbe_attrs_del_bulk_pol[MDBE_ATTR_MAX + 1] = {
1424 	[MDBE_ATTR_RTPROT] = NLA_POLICY_MIN(NLA_U8, RTPROT_STATIC),
1425 	[MDBE_ATTR_STATE_MASK] = NLA_POLICY_MASK(NLA_U8, MDB_PERMANENT),
1426 };
1427 
1428 static int br_mdb_flush_desc_init(struct br_mdb_flush_desc *desc,
1429 				  struct nlattr *tb[],
1430 				  struct netlink_ext_ack *extack)
1431 {
1432 	struct br_mdb_entry *entry = nla_data(tb[MDBA_SET_ENTRY]);
1433 	struct nlattr *mdbe_attrs[MDBE_ATTR_MAX + 1];
1434 	int err;
1435 
1436 	desc->port_ifindex = entry->ifindex;
1437 	desc->vid = entry->vid;
1438 	desc->state = entry->state;
1439 
1440 	if (!tb[MDBA_SET_ENTRY_ATTRS])
1441 		return 0;
1442 
1443 	err = nla_parse_nested(mdbe_attrs, MDBE_ATTR_MAX,
1444 			       tb[MDBA_SET_ENTRY_ATTRS],
1445 			       br_mdbe_attrs_del_bulk_pol, extack);
1446 	if (err)
1447 		return err;
1448 
1449 	if (mdbe_attrs[MDBE_ATTR_STATE_MASK])
1450 		desc->state_mask = nla_get_u8(mdbe_attrs[MDBE_ATTR_STATE_MASK]);
1451 
1452 	if (mdbe_attrs[MDBE_ATTR_RTPROT])
1453 		desc->rt_protocol = nla_get_u8(mdbe_attrs[MDBE_ATTR_RTPROT]);
1454 
1455 	return 0;
1456 }
1457 
1458 static void br_mdb_flush_host(struct net_bridge *br,
1459 			      struct net_bridge_mdb_entry *mp,
1460 			      const struct br_mdb_flush_desc *desc)
1461 {
1462 	u8 state;
1463 
1464 	if (desc->port_ifindex && desc->port_ifindex != br->dev->ifindex)
1465 		return;
1466 
1467 	if (desc->rt_protocol)
1468 		return;
1469 
1470 	state = br_group_is_l2(&mp->addr) ? MDB_PERMANENT : 0;
1471 	if (desc->state_mask && (state & desc->state_mask) != desc->state)
1472 		return;
1473 
1474 	br_multicast_host_leave(mp, true);
1475 	if (!mp->ports && netif_running(br->dev))
1476 		mod_timer(&mp->timer, jiffies);
1477 }
1478 
1479 static void br_mdb_flush_pgs(struct net_bridge *br,
1480 			     struct net_bridge_mdb_entry *mp,
1481 			     const struct br_mdb_flush_desc *desc)
1482 {
1483 	struct net_bridge_port_group __rcu **pp;
1484 	struct net_bridge_port_group *p;
1485 
1486 	for (pp = &mp->ports; (p = mlock_dereference(*pp, br)) != NULL;) {
1487 		u8 state;
1488 
1489 		if (desc->port_ifindex &&
1490 		    desc->port_ifindex != p->key.port->dev->ifindex) {
1491 			pp = &p->next;
1492 			continue;
1493 		}
1494 
1495 		if (desc->rt_protocol && desc->rt_protocol != p->rt_protocol) {
1496 			pp = &p->next;
1497 			continue;
1498 		}
1499 
1500 		state = p->flags & MDB_PG_FLAGS_PERMANENT ? MDB_PERMANENT : 0;
1501 		if (desc->state_mask &&
1502 		    (state & desc->state_mask) != desc->state) {
1503 			pp = &p->next;
1504 			continue;
1505 		}
1506 
1507 		br_multicast_del_pg(mp, p, pp);
1508 	}
1509 }
1510 
1511 static void br_mdb_flush(struct net_bridge *br,
1512 			 const struct br_mdb_flush_desc *desc)
1513 {
1514 	struct net_bridge_mdb_entry *mp;
1515 
1516 	spin_lock_bh(&br->multicast_lock);
1517 
1518 	/* Safe variant is not needed because entries are removed from the list
1519 	 * upon group timer expiration or bridge deletion.
1520 	 */
1521 	hlist_for_each_entry(mp, &br->mdb_list, mdb_node) {
1522 		if (desc->vid && desc->vid != mp->addr.vid)
1523 			continue;
1524 
1525 		br_mdb_flush_host(br, mp, desc);
1526 		br_mdb_flush_pgs(br, mp, desc);
1527 	}
1528 
1529 	spin_unlock_bh(&br->multicast_lock);
1530 }
1531 
1532 int br_mdb_del_bulk(struct net_device *dev, struct nlattr *tb[],
1533 		    struct netlink_ext_ack *extack)
1534 {
1535 	struct net_bridge *br = netdev_priv(dev);
1536 	struct br_mdb_flush_desc desc = {};
1537 	int err;
1538 
1539 	err = br_mdb_flush_desc_init(&desc, tb, extack);
1540 	if (err)
1541 		return err;
1542 
1543 	br_mdb_flush(br, &desc);
1544 
1545 	return 0;
1546 }
1547 
1548 static const struct nla_policy br_mdbe_attrs_get_pol[MDBE_ATTR_MAX + 1] = {
1549 	[MDBE_ATTR_SOURCE] = NLA_POLICY_RANGE(NLA_BINARY,
1550 					      sizeof(struct in_addr),
1551 					      sizeof(struct in6_addr)),
1552 };
1553 
1554 static int br_mdb_get_parse(struct net_device *dev, struct nlattr *tb[],
1555 			    struct br_ip *group, struct netlink_ext_ack *extack)
1556 {
1557 	struct br_mdb_entry *entry = nla_data(tb[MDBA_GET_ENTRY]);
1558 	struct nlattr *mdbe_attrs[MDBE_ATTR_MAX + 1];
1559 	int err;
1560 
1561 	if (!tb[MDBA_GET_ENTRY_ATTRS]) {
1562 		__mdb_entry_to_br_ip(entry, group, NULL);
1563 		return 0;
1564 	}
1565 
1566 	err = nla_parse_nested(mdbe_attrs, MDBE_ATTR_MAX,
1567 			       tb[MDBA_GET_ENTRY_ATTRS], br_mdbe_attrs_get_pol,
1568 			       extack);
1569 	if (err)
1570 		return err;
1571 
1572 	if (mdbe_attrs[MDBE_ATTR_SOURCE] &&
1573 	    !is_valid_mdb_source(mdbe_attrs[MDBE_ATTR_SOURCE],
1574 				 entry->addr.proto, extack))
1575 		return -EINVAL;
1576 
1577 	__mdb_entry_to_br_ip(entry, group, mdbe_attrs);
1578 
1579 	return 0;
1580 }
1581 
1582 static struct sk_buff *
1583 br_mdb_get_reply_alloc(const struct net_bridge_mdb_entry *mp)
1584 {
1585 	struct net_bridge_port_group *pg;
1586 	size_t nlmsg_size;
1587 
1588 	nlmsg_size = NLMSG_ALIGN(sizeof(struct br_port_msg)) +
1589 		     /* MDBA_MDB */
1590 		     nla_total_size(0) +
1591 		     /* MDBA_MDB_ENTRY */
1592 		     nla_total_size(0);
1593 
1594 	if (mp->host_joined)
1595 		nlmsg_size += rtnl_mdb_nlmsg_pg_size(NULL);
1596 
1597 	for (pg = mlock_dereference(mp->ports, mp->br); pg;
1598 	     pg = mlock_dereference(pg->next, mp->br))
1599 		nlmsg_size += rtnl_mdb_nlmsg_pg_size(pg);
1600 
1601 	return nlmsg_new(nlmsg_size, GFP_ATOMIC);
1602 }
1603 
1604 static int br_mdb_get_reply_fill(struct sk_buff *skb,
1605 				 struct net_bridge_mdb_entry *mp, u32 portid,
1606 				 u32 seq)
1607 {
1608 	struct nlattr *mdb_nest, *mdb_entry_nest;
1609 	struct net_bridge_port_group *pg;
1610 	struct br_port_msg *bpm;
1611 	struct nlmsghdr *nlh;
1612 	int err;
1613 
1614 	nlh = nlmsg_put(skb, portid, seq, RTM_NEWMDB, sizeof(*bpm), 0);
1615 	if (!nlh)
1616 		return -EMSGSIZE;
1617 
1618 	bpm = nlmsg_data(nlh);
1619 	memset(bpm, 0, sizeof(*bpm));
1620 	bpm->family  = AF_BRIDGE;
1621 	bpm->ifindex = mp->br->dev->ifindex;
1622 	mdb_nest = nla_nest_start_noflag(skb, MDBA_MDB);
1623 	if (!mdb_nest) {
1624 		err = -EMSGSIZE;
1625 		goto cancel;
1626 	}
1627 	mdb_entry_nest = nla_nest_start_noflag(skb, MDBA_MDB_ENTRY);
1628 	if (!mdb_entry_nest) {
1629 		err = -EMSGSIZE;
1630 		goto cancel;
1631 	}
1632 
1633 	if (mp->host_joined) {
1634 		err = __mdb_fill_info(skb, mp, NULL);
1635 		if (err)
1636 			goto cancel;
1637 	}
1638 
1639 	for (pg = mlock_dereference(mp->ports, mp->br); pg;
1640 	     pg = mlock_dereference(pg->next, mp->br)) {
1641 		err = __mdb_fill_info(skb, mp, pg);
1642 		if (err)
1643 			goto cancel;
1644 	}
1645 
1646 	nla_nest_end(skb, mdb_entry_nest);
1647 	nla_nest_end(skb, mdb_nest);
1648 	nlmsg_end(skb, nlh);
1649 
1650 	return 0;
1651 
1652 cancel:
1653 	nlmsg_cancel(skb, nlh);
1654 	return err;
1655 }
1656 
1657 int br_mdb_get(struct net_device *dev, struct nlattr *tb[], u32 portid, u32 seq,
1658 	       struct netlink_ext_ack *extack)
1659 {
1660 	struct net_bridge *br = netdev_priv(dev);
1661 	struct net_bridge_mdb_entry *mp;
1662 	struct sk_buff *skb;
1663 	struct br_ip group;
1664 	int err;
1665 
1666 	err = br_mdb_get_parse(dev, tb, &group, extack);
1667 	if (err)
1668 		return err;
1669 
1670 	/* Hold the multicast lock to ensure that the MDB entry does not change
1671 	 * between the time the reply size is determined and when the reply is
1672 	 * filled in.
1673 	 */
1674 	spin_lock_bh(&br->multicast_lock);
1675 
1676 	mp = br_mdb_ip_get(br, &group);
1677 	if (!mp || (!mp->ports && !mp->host_joined)) {
1678 		NL_SET_ERR_MSG_MOD(extack, "MDB entry not found");
1679 		err = -ENOENT;
1680 		goto unlock;
1681 	}
1682 
1683 	skb = br_mdb_get_reply_alloc(mp);
1684 	if (!skb) {
1685 		err = -ENOMEM;
1686 		goto unlock;
1687 	}
1688 
1689 	err = br_mdb_get_reply_fill(skb, mp, portid, seq);
1690 	if (err) {
1691 		NL_SET_ERR_MSG_MOD(extack, "Failed to fill MDB get reply");
1692 		goto free;
1693 	}
1694 
1695 	spin_unlock_bh(&br->multicast_lock);
1696 
1697 	return rtnl_unicast(skb, dev_net(dev), portid);
1698 
1699 free:
1700 	kfree_skb(skb);
1701 unlock:
1702 	spin_unlock_bh(&br->multicast_lock);
1703 	return err;
1704 }
1705