xref: /linux/net/bridge/br_mdb.c (revision de6e0b198239857943db395377dc1d2ddd6c05df)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/err.h>
3 #include <linux/igmp.h>
4 #include <linux/kernel.h>
5 #include <linux/netdevice.h>
6 #include <linux/rculist.h>
7 #include <linux/skbuff.h>
8 #include <linux/if_ether.h>
9 #include <net/ip.h>
10 #include <net/netlink.h>
11 #include <net/switchdev.h>
12 #if IS_ENABLED(CONFIG_IPV6)
13 #include <net/ipv6.h>
14 #include <net/addrconf.h>
15 #endif
16 
17 #include "br_private.h"
18 
19 static int br_rports_fill_info(struct sk_buff *skb, struct netlink_callback *cb,
20 			       struct net_device *dev)
21 {
22 	struct net_bridge *br = netdev_priv(dev);
23 	struct net_bridge_port *p;
24 	struct nlattr *nest, *port_nest;
25 
26 	if (!br->multicast_router || hlist_empty(&br->router_list))
27 		return 0;
28 
29 	nest = nla_nest_start_noflag(skb, MDBA_ROUTER);
30 	if (nest == NULL)
31 		return -EMSGSIZE;
32 
33 	hlist_for_each_entry_rcu(p, &br->router_list, rlist) {
34 		if (!p)
35 			continue;
36 		port_nest = nla_nest_start_noflag(skb, MDBA_ROUTER_PORT);
37 		if (!port_nest)
38 			goto fail;
39 		if (nla_put_nohdr(skb, sizeof(u32), &p->dev->ifindex) ||
40 		    nla_put_u32(skb, MDBA_ROUTER_PATTR_TIMER,
41 				br_timer_value(&p->multicast_router_timer)) ||
42 		    nla_put_u8(skb, MDBA_ROUTER_PATTR_TYPE,
43 			       p->multicast_router)) {
44 			nla_nest_cancel(skb, port_nest);
45 			goto fail;
46 		}
47 		nla_nest_end(skb, port_nest);
48 	}
49 
50 	nla_nest_end(skb, nest);
51 	return 0;
52 fail:
53 	nla_nest_cancel(skb, nest);
54 	return -EMSGSIZE;
55 }
56 
57 static void __mdb_entry_fill_flags(struct br_mdb_entry *e, unsigned char flags)
58 {
59 	e->state = flags & MDB_PG_FLAGS_PERMANENT;
60 	e->flags = 0;
61 	if (flags & MDB_PG_FLAGS_OFFLOAD)
62 		e->flags |= MDB_FLAGS_OFFLOAD;
63 	if (flags & MDB_PG_FLAGS_FAST_LEAVE)
64 		e->flags |= MDB_FLAGS_FAST_LEAVE;
65 	if (flags & MDB_PG_FLAGS_STAR_EXCL)
66 		e->flags |= MDB_FLAGS_STAR_EXCL;
67 	if (flags & MDB_PG_FLAGS_BLOCKED)
68 		e->flags |= MDB_FLAGS_BLOCKED;
69 }
70 
71 static void __mdb_entry_to_br_ip(struct br_mdb_entry *entry, struct br_ip *ip,
72 				 struct nlattr **mdb_attrs)
73 {
74 	memset(ip, 0, sizeof(struct br_ip));
75 	ip->vid = entry->vid;
76 	ip->proto = entry->addr.proto;
77 	switch (ip->proto) {
78 	case htons(ETH_P_IP):
79 		ip->dst.ip4 = entry->addr.u.ip4;
80 		if (mdb_attrs && mdb_attrs[MDBE_ATTR_SOURCE])
81 			ip->src.ip4 = nla_get_in_addr(mdb_attrs[MDBE_ATTR_SOURCE]);
82 		break;
83 #if IS_ENABLED(CONFIG_IPV6)
84 	case htons(ETH_P_IPV6):
85 		ip->dst.ip6 = entry->addr.u.ip6;
86 		if (mdb_attrs && mdb_attrs[MDBE_ATTR_SOURCE])
87 			ip->src.ip6 = nla_get_in6_addr(mdb_attrs[MDBE_ATTR_SOURCE]);
88 		break;
89 #endif
90 	default:
91 		ether_addr_copy(ip->dst.mac_addr, entry->addr.u.mac_addr);
92 	}
93 
94 }
95 
96 static int __mdb_fill_srcs(struct sk_buff *skb,
97 			   struct net_bridge_port_group *p)
98 {
99 	struct net_bridge_group_src *ent;
100 	struct nlattr *nest, *nest_ent;
101 
102 	if (hlist_empty(&p->src_list))
103 		return 0;
104 
105 	nest = nla_nest_start(skb, MDBA_MDB_EATTR_SRC_LIST);
106 	if (!nest)
107 		return -EMSGSIZE;
108 
109 	hlist_for_each_entry_rcu(ent, &p->src_list, node,
110 				 lockdep_is_held(&p->key.port->br->multicast_lock)) {
111 		nest_ent = nla_nest_start(skb, MDBA_MDB_SRCLIST_ENTRY);
112 		if (!nest_ent)
113 			goto out_cancel_err;
114 		switch (ent->addr.proto) {
115 		case htons(ETH_P_IP):
116 			if (nla_put_in_addr(skb, MDBA_MDB_SRCATTR_ADDRESS,
117 					    ent->addr.src.ip4)) {
118 				nla_nest_cancel(skb, nest_ent);
119 				goto out_cancel_err;
120 			}
121 			break;
122 #if IS_ENABLED(CONFIG_IPV6)
123 		case htons(ETH_P_IPV6):
124 			if (nla_put_in6_addr(skb, MDBA_MDB_SRCATTR_ADDRESS,
125 					     &ent->addr.src.ip6)) {
126 				nla_nest_cancel(skb, nest_ent);
127 				goto out_cancel_err;
128 			}
129 			break;
130 #endif
131 		default:
132 			nla_nest_cancel(skb, nest_ent);
133 			continue;
134 		}
135 		if (nla_put_u32(skb, MDBA_MDB_SRCATTR_TIMER,
136 				br_timer_value(&ent->timer))) {
137 			nla_nest_cancel(skb, nest_ent);
138 			goto out_cancel_err;
139 		}
140 		nla_nest_end(skb, nest_ent);
141 	}
142 
143 	nla_nest_end(skb, nest);
144 
145 	return 0;
146 
147 out_cancel_err:
148 	nla_nest_cancel(skb, nest);
149 	return -EMSGSIZE;
150 }
151 
152 static int __mdb_fill_info(struct sk_buff *skb,
153 			   struct net_bridge_mdb_entry *mp,
154 			   struct net_bridge_port_group *p)
155 {
156 	bool dump_srcs_mode = false;
157 	struct timer_list *mtimer;
158 	struct nlattr *nest_ent;
159 	struct br_mdb_entry e;
160 	u8 flags = 0;
161 	int ifindex;
162 
163 	memset(&e, 0, sizeof(e));
164 	if (p) {
165 		ifindex = p->key.port->dev->ifindex;
166 		mtimer = &p->timer;
167 		flags = p->flags;
168 	} else {
169 		ifindex = mp->br->dev->ifindex;
170 		mtimer = &mp->timer;
171 	}
172 
173 	__mdb_entry_fill_flags(&e, flags);
174 	e.ifindex = ifindex;
175 	e.vid = mp->addr.vid;
176 	if (mp->addr.proto == htons(ETH_P_IP))
177 		e.addr.u.ip4 = mp->addr.dst.ip4;
178 #if IS_ENABLED(CONFIG_IPV6)
179 	else if (mp->addr.proto == htons(ETH_P_IPV6))
180 		e.addr.u.ip6 = mp->addr.dst.ip6;
181 #endif
182 	else
183 		ether_addr_copy(e.addr.u.mac_addr, mp->addr.dst.mac_addr);
184 	e.addr.proto = mp->addr.proto;
185 	nest_ent = nla_nest_start_noflag(skb,
186 					 MDBA_MDB_ENTRY_INFO);
187 	if (!nest_ent)
188 		return -EMSGSIZE;
189 
190 	if (nla_put_nohdr(skb, sizeof(e), &e) ||
191 	    nla_put_u32(skb,
192 			MDBA_MDB_EATTR_TIMER,
193 			br_timer_value(mtimer)))
194 		goto nest_err;
195 
196 	switch (mp->addr.proto) {
197 	case htons(ETH_P_IP):
198 		dump_srcs_mode = !!(mp->br->multicast_igmp_version == 3);
199 		if (mp->addr.src.ip4) {
200 			if (nla_put_in_addr(skb, MDBA_MDB_EATTR_SOURCE,
201 					    mp->addr.src.ip4))
202 				goto nest_err;
203 			break;
204 		}
205 		break;
206 #if IS_ENABLED(CONFIG_IPV6)
207 	case htons(ETH_P_IPV6):
208 		dump_srcs_mode = !!(mp->br->multicast_mld_version == 2);
209 		if (!ipv6_addr_any(&mp->addr.src.ip6)) {
210 			if (nla_put_in6_addr(skb, MDBA_MDB_EATTR_SOURCE,
211 					     &mp->addr.src.ip6))
212 				goto nest_err;
213 			break;
214 		}
215 		break;
216 #endif
217 	default:
218 		ether_addr_copy(e.addr.u.mac_addr, mp->addr.dst.mac_addr);
219 	}
220 	if (p) {
221 		if (nla_put_u8(skb, MDBA_MDB_EATTR_RTPROT, p->rt_protocol))
222 			goto nest_err;
223 		if (dump_srcs_mode &&
224 		    (__mdb_fill_srcs(skb, p) ||
225 		     nla_put_u8(skb, MDBA_MDB_EATTR_GROUP_MODE,
226 				p->filter_mode)))
227 			goto nest_err;
228 	}
229 	nla_nest_end(skb, nest_ent);
230 
231 	return 0;
232 
233 nest_err:
234 	nla_nest_cancel(skb, nest_ent);
235 	return -EMSGSIZE;
236 }
237 
238 static int br_mdb_fill_info(struct sk_buff *skb, struct netlink_callback *cb,
239 			    struct net_device *dev)
240 {
241 	int idx = 0, s_idx = cb->args[1], err = 0, pidx = 0, s_pidx = cb->args[2];
242 	struct net_bridge *br = netdev_priv(dev);
243 	struct net_bridge_mdb_entry *mp;
244 	struct nlattr *nest, *nest2;
245 
246 	if (!br_opt_get(br, BROPT_MULTICAST_ENABLED))
247 		return 0;
248 
249 	nest = nla_nest_start_noflag(skb, MDBA_MDB);
250 	if (nest == NULL)
251 		return -EMSGSIZE;
252 
253 	hlist_for_each_entry_rcu(mp, &br->mdb_list, mdb_node) {
254 		struct net_bridge_port_group *p;
255 		struct net_bridge_port_group __rcu **pp;
256 
257 		if (idx < s_idx)
258 			goto skip;
259 
260 		nest2 = nla_nest_start_noflag(skb, MDBA_MDB_ENTRY);
261 		if (!nest2) {
262 			err = -EMSGSIZE;
263 			break;
264 		}
265 
266 		if (!s_pidx && mp->host_joined) {
267 			err = __mdb_fill_info(skb, mp, NULL);
268 			if (err) {
269 				nla_nest_cancel(skb, nest2);
270 				break;
271 			}
272 		}
273 
274 		for (pp = &mp->ports; (p = rcu_dereference(*pp)) != NULL;
275 		      pp = &p->next) {
276 			if (!p->key.port)
277 				continue;
278 			if (pidx < s_pidx)
279 				goto skip_pg;
280 
281 			err = __mdb_fill_info(skb, mp, p);
282 			if (err) {
283 				nla_nest_end(skb, nest2);
284 				goto out;
285 			}
286 skip_pg:
287 			pidx++;
288 		}
289 		pidx = 0;
290 		s_pidx = 0;
291 		nla_nest_end(skb, nest2);
292 skip:
293 		idx++;
294 	}
295 
296 out:
297 	cb->args[1] = idx;
298 	cb->args[2] = pidx;
299 	nla_nest_end(skb, nest);
300 	return err;
301 }
302 
303 static int br_mdb_valid_dump_req(const struct nlmsghdr *nlh,
304 				 struct netlink_ext_ack *extack)
305 {
306 	struct br_port_msg *bpm;
307 
308 	if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*bpm))) {
309 		NL_SET_ERR_MSG_MOD(extack, "Invalid header for mdb dump request");
310 		return -EINVAL;
311 	}
312 
313 	bpm = nlmsg_data(nlh);
314 	if (bpm->ifindex) {
315 		NL_SET_ERR_MSG_MOD(extack, "Filtering by device index is not supported for mdb dump request");
316 		return -EINVAL;
317 	}
318 	if (nlmsg_attrlen(nlh, sizeof(*bpm))) {
319 		NL_SET_ERR_MSG(extack, "Invalid data after header in mdb dump request");
320 		return -EINVAL;
321 	}
322 
323 	return 0;
324 }
325 
326 static int br_mdb_dump(struct sk_buff *skb, struct netlink_callback *cb)
327 {
328 	struct net_device *dev;
329 	struct net *net = sock_net(skb->sk);
330 	struct nlmsghdr *nlh = NULL;
331 	int idx = 0, s_idx;
332 
333 	if (cb->strict_check) {
334 		int err = br_mdb_valid_dump_req(cb->nlh, cb->extack);
335 
336 		if (err < 0)
337 			return err;
338 	}
339 
340 	s_idx = cb->args[0];
341 
342 	rcu_read_lock();
343 
344 	cb->seq = net->dev_base_seq;
345 
346 	for_each_netdev_rcu(net, dev) {
347 		if (dev->priv_flags & IFF_EBRIDGE) {
348 			struct br_port_msg *bpm;
349 
350 			if (idx < s_idx)
351 				goto skip;
352 
353 			nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid,
354 					cb->nlh->nlmsg_seq, RTM_GETMDB,
355 					sizeof(*bpm), NLM_F_MULTI);
356 			if (nlh == NULL)
357 				break;
358 
359 			bpm = nlmsg_data(nlh);
360 			memset(bpm, 0, sizeof(*bpm));
361 			bpm->ifindex = dev->ifindex;
362 			if (br_mdb_fill_info(skb, cb, dev) < 0)
363 				goto out;
364 			if (br_rports_fill_info(skb, cb, dev) < 0)
365 				goto out;
366 
367 			cb->args[1] = 0;
368 			nlmsg_end(skb, nlh);
369 		skip:
370 			idx++;
371 		}
372 	}
373 
374 out:
375 	if (nlh)
376 		nlmsg_end(skb, nlh);
377 	rcu_read_unlock();
378 	cb->args[0] = idx;
379 	return skb->len;
380 }
381 
382 static int nlmsg_populate_mdb_fill(struct sk_buff *skb,
383 				   struct net_device *dev,
384 				   struct net_bridge_mdb_entry *mp,
385 				   struct net_bridge_port_group *pg,
386 				   int type)
387 {
388 	struct nlmsghdr *nlh;
389 	struct br_port_msg *bpm;
390 	struct nlattr *nest, *nest2;
391 
392 	nlh = nlmsg_put(skb, 0, 0, type, sizeof(*bpm), 0);
393 	if (!nlh)
394 		return -EMSGSIZE;
395 
396 	bpm = nlmsg_data(nlh);
397 	memset(bpm, 0, sizeof(*bpm));
398 	bpm->family  = AF_BRIDGE;
399 	bpm->ifindex = dev->ifindex;
400 	nest = nla_nest_start_noflag(skb, MDBA_MDB);
401 	if (nest == NULL)
402 		goto cancel;
403 	nest2 = nla_nest_start_noflag(skb, MDBA_MDB_ENTRY);
404 	if (nest2 == NULL)
405 		goto end;
406 
407 	if (__mdb_fill_info(skb, mp, pg))
408 		goto end;
409 
410 	nla_nest_end(skb, nest2);
411 	nla_nest_end(skb, nest);
412 	nlmsg_end(skb, nlh);
413 	return 0;
414 
415 end:
416 	nla_nest_end(skb, nest);
417 cancel:
418 	nlmsg_cancel(skb, nlh);
419 	return -EMSGSIZE;
420 }
421 
422 static size_t rtnl_mdb_nlmsg_size(struct net_bridge_port_group *pg)
423 {
424 	size_t nlmsg_size = NLMSG_ALIGN(sizeof(struct br_port_msg)) +
425 			    nla_total_size(sizeof(struct br_mdb_entry)) +
426 			    nla_total_size(sizeof(u32));
427 	struct net_bridge_group_src *ent;
428 	size_t addr_size = 0;
429 
430 	if (!pg)
431 		goto out;
432 
433 	/* MDBA_MDB_EATTR_RTPROT */
434 	nlmsg_size += nla_total_size(sizeof(u8));
435 
436 	switch (pg->key.addr.proto) {
437 	case htons(ETH_P_IP):
438 		/* MDBA_MDB_EATTR_SOURCE */
439 		if (pg->key.addr.src.ip4)
440 			nlmsg_size += nla_total_size(sizeof(__be32));
441 		if (pg->key.port->br->multicast_igmp_version == 2)
442 			goto out;
443 		addr_size = sizeof(__be32);
444 		break;
445 #if IS_ENABLED(CONFIG_IPV6)
446 	case htons(ETH_P_IPV6):
447 		/* MDBA_MDB_EATTR_SOURCE */
448 		if (!ipv6_addr_any(&pg->key.addr.src.ip6))
449 			nlmsg_size += nla_total_size(sizeof(struct in6_addr));
450 		if (pg->key.port->br->multicast_mld_version == 1)
451 			goto out;
452 		addr_size = sizeof(struct in6_addr);
453 		break;
454 #endif
455 	}
456 
457 	/* MDBA_MDB_EATTR_GROUP_MODE */
458 	nlmsg_size += nla_total_size(sizeof(u8));
459 
460 	/* MDBA_MDB_EATTR_SRC_LIST nested attr */
461 	if (!hlist_empty(&pg->src_list))
462 		nlmsg_size += nla_total_size(0);
463 
464 	hlist_for_each_entry(ent, &pg->src_list, node) {
465 		/* MDBA_MDB_SRCLIST_ENTRY nested attr +
466 		 * MDBA_MDB_SRCATTR_ADDRESS + MDBA_MDB_SRCATTR_TIMER
467 		 */
468 		nlmsg_size += nla_total_size(0) +
469 			      nla_total_size(addr_size) +
470 			      nla_total_size(sizeof(u32));
471 	}
472 out:
473 	return nlmsg_size;
474 }
475 
476 struct br_mdb_complete_info {
477 	struct net_bridge_port *port;
478 	struct br_ip ip;
479 };
480 
481 static void br_mdb_complete(struct net_device *dev, int err, void *priv)
482 {
483 	struct br_mdb_complete_info *data = priv;
484 	struct net_bridge_port_group __rcu **pp;
485 	struct net_bridge_port_group *p;
486 	struct net_bridge_mdb_entry *mp;
487 	struct net_bridge_port *port = data->port;
488 	struct net_bridge *br = port->br;
489 
490 	if (err)
491 		goto err;
492 
493 	spin_lock_bh(&br->multicast_lock);
494 	mp = br_mdb_ip_get(br, &data->ip);
495 	if (!mp)
496 		goto out;
497 	for (pp = &mp->ports; (p = mlock_dereference(*pp, br)) != NULL;
498 	     pp = &p->next) {
499 		if (p->key.port != port)
500 			continue;
501 		p->flags |= MDB_PG_FLAGS_OFFLOAD;
502 	}
503 out:
504 	spin_unlock_bh(&br->multicast_lock);
505 err:
506 	kfree(priv);
507 }
508 
509 static void br_mdb_switchdev_host_port(struct net_device *dev,
510 				       struct net_device *lower_dev,
511 				       struct net_bridge_mdb_entry *mp,
512 				       int type)
513 {
514 	struct switchdev_obj_port_mdb mdb = {
515 		.obj = {
516 			.id = SWITCHDEV_OBJ_ID_HOST_MDB,
517 			.flags = SWITCHDEV_F_DEFER,
518 		},
519 		.vid = mp->addr.vid,
520 	};
521 
522 	if (mp->addr.proto == htons(ETH_P_IP))
523 		ip_eth_mc_map(mp->addr.dst.ip4, mdb.addr);
524 #if IS_ENABLED(CONFIG_IPV6)
525 	else
526 		ipv6_eth_mc_map(&mp->addr.dst.ip6, mdb.addr);
527 #endif
528 
529 	mdb.obj.orig_dev = dev;
530 	switch (type) {
531 	case RTM_NEWMDB:
532 		switchdev_port_obj_add(lower_dev, &mdb.obj, NULL);
533 		break;
534 	case RTM_DELMDB:
535 		switchdev_port_obj_del(lower_dev, &mdb.obj);
536 		break;
537 	}
538 }
539 
540 static void br_mdb_switchdev_host(struct net_device *dev,
541 				  struct net_bridge_mdb_entry *mp, int type)
542 {
543 	struct net_device *lower_dev;
544 	struct list_head *iter;
545 
546 	netdev_for_each_lower_dev(dev, lower_dev, iter)
547 		br_mdb_switchdev_host_port(dev, lower_dev, mp, type);
548 }
549 
550 void br_mdb_notify(struct net_device *dev,
551 		   struct net_bridge_mdb_entry *mp,
552 		   struct net_bridge_port_group *pg,
553 		   int type)
554 {
555 	struct br_mdb_complete_info *complete_info;
556 	struct switchdev_obj_port_mdb mdb = {
557 		.obj = {
558 			.id = SWITCHDEV_OBJ_ID_PORT_MDB,
559 			.flags = SWITCHDEV_F_DEFER,
560 		},
561 		.vid = mp->addr.vid,
562 	};
563 	struct net *net = dev_net(dev);
564 	struct sk_buff *skb;
565 	int err = -ENOBUFS;
566 
567 	if (pg) {
568 		if (mp->addr.proto == htons(ETH_P_IP))
569 			ip_eth_mc_map(mp->addr.dst.ip4, mdb.addr);
570 #if IS_ENABLED(CONFIG_IPV6)
571 		else if (mp->addr.proto == htons(ETH_P_IPV6))
572 			ipv6_eth_mc_map(&mp->addr.dst.ip6, mdb.addr);
573 #endif
574 		else
575 			ether_addr_copy(mdb.addr, mp->addr.dst.mac_addr);
576 
577 		mdb.obj.orig_dev = pg->key.port->dev;
578 		switch (type) {
579 		case RTM_NEWMDB:
580 			complete_info = kmalloc(sizeof(*complete_info), GFP_ATOMIC);
581 			if (!complete_info)
582 				break;
583 			complete_info->port = pg->key.port;
584 			complete_info->ip = mp->addr;
585 			mdb.obj.complete_priv = complete_info;
586 			mdb.obj.complete = br_mdb_complete;
587 			if (switchdev_port_obj_add(pg->key.port->dev, &mdb.obj, NULL))
588 				kfree(complete_info);
589 			break;
590 		case RTM_DELMDB:
591 			switchdev_port_obj_del(pg->key.port->dev, &mdb.obj);
592 			break;
593 		}
594 	} else {
595 		br_mdb_switchdev_host(dev, mp, type);
596 	}
597 
598 	skb = nlmsg_new(rtnl_mdb_nlmsg_size(pg), GFP_ATOMIC);
599 	if (!skb)
600 		goto errout;
601 
602 	err = nlmsg_populate_mdb_fill(skb, dev, mp, pg, type);
603 	if (err < 0) {
604 		kfree_skb(skb);
605 		goto errout;
606 	}
607 
608 	rtnl_notify(skb, net, 0, RTNLGRP_MDB, NULL, GFP_ATOMIC);
609 	return;
610 errout:
611 	rtnl_set_sk_err(net, RTNLGRP_MDB, err);
612 }
613 
614 static int nlmsg_populate_rtr_fill(struct sk_buff *skb,
615 				   struct net_device *dev,
616 				   int ifindex, u32 pid,
617 				   u32 seq, int type, unsigned int flags)
618 {
619 	struct br_port_msg *bpm;
620 	struct nlmsghdr *nlh;
621 	struct nlattr *nest;
622 
623 	nlh = nlmsg_put(skb, pid, seq, type, sizeof(*bpm), 0);
624 	if (!nlh)
625 		return -EMSGSIZE;
626 
627 	bpm = nlmsg_data(nlh);
628 	memset(bpm, 0, sizeof(*bpm));
629 	bpm->family = AF_BRIDGE;
630 	bpm->ifindex = dev->ifindex;
631 	nest = nla_nest_start_noflag(skb, MDBA_ROUTER);
632 	if (!nest)
633 		goto cancel;
634 
635 	if (nla_put_u32(skb, MDBA_ROUTER_PORT, ifindex))
636 		goto end;
637 
638 	nla_nest_end(skb, nest);
639 	nlmsg_end(skb, nlh);
640 	return 0;
641 
642 end:
643 	nla_nest_end(skb, nest);
644 cancel:
645 	nlmsg_cancel(skb, nlh);
646 	return -EMSGSIZE;
647 }
648 
649 static inline size_t rtnl_rtr_nlmsg_size(void)
650 {
651 	return NLMSG_ALIGN(sizeof(struct br_port_msg))
652 		+ nla_total_size(sizeof(__u32));
653 }
654 
655 void br_rtr_notify(struct net_device *dev, struct net_bridge_port *port,
656 		   int type)
657 {
658 	struct net *net = dev_net(dev);
659 	struct sk_buff *skb;
660 	int err = -ENOBUFS;
661 	int ifindex;
662 
663 	ifindex = port ? port->dev->ifindex : 0;
664 	skb = nlmsg_new(rtnl_rtr_nlmsg_size(), GFP_ATOMIC);
665 	if (!skb)
666 		goto errout;
667 
668 	err = nlmsg_populate_rtr_fill(skb, dev, ifindex, 0, 0, type, NTF_SELF);
669 	if (err < 0) {
670 		kfree_skb(skb);
671 		goto errout;
672 	}
673 
674 	rtnl_notify(skb, net, 0, RTNLGRP_MDB, NULL, GFP_ATOMIC);
675 	return;
676 
677 errout:
678 	rtnl_set_sk_err(net, RTNLGRP_MDB, err);
679 }
680 
681 static bool is_valid_mdb_entry(struct br_mdb_entry *entry,
682 			       struct netlink_ext_ack *extack)
683 {
684 	if (entry->ifindex == 0) {
685 		NL_SET_ERR_MSG_MOD(extack, "Zero entry ifindex is not allowed");
686 		return false;
687 	}
688 
689 	if (entry->addr.proto == htons(ETH_P_IP)) {
690 		if (!ipv4_is_multicast(entry->addr.u.ip4)) {
691 			NL_SET_ERR_MSG_MOD(extack, "IPv4 entry group address is not multicast");
692 			return false;
693 		}
694 		if (ipv4_is_local_multicast(entry->addr.u.ip4)) {
695 			NL_SET_ERR_MSG_MOD(extack, "IPv4 entry group address is local multicast");
696 			return false;
697 		}
698 #if IS_ENABLED(CONFIG_IPV6)
699 	} else if (entry->addr.proto == htons(ETH_P_IPV6)) {
700 		if (ipv6_addr_is_ll_all_nodes(&entry->addr.u.ip6)) {
701 			NL_SET_ERR_MSG_MOD(extack, "IPv6 entry group address is link-local all nodes");
702 			return false;
703 		}
704 #endif
705 	} else if (entry->addr.proto == 0) {
706 		/* L2 mdb */
707 		if (!is_multicast_ether_addr(entry->addr.u.mac_addr)) {
708 			NL_SET_ERR_MSG_MOD(extack, "L2 entry group is not multicast");
709 			return false;
710 		}
711 	} else {
712 		NL_SET_ERR_MSG_MOD(extack, "Unknown entry protocol");
713 		return false;
714 	}
715 
716 	if (entry->state != MDB_PERMANENT && entry->state != MDB_TEMPORARY) {
717 		NL_SET_ERR_MSG_MOD(extack, "Unknown entry state");
718 		return false;
719 	}
720 	if (entry->vid >= VLAN_VID_MASK) {
721 		NL_SET_ERR_MSG_MOD(extack, "Invalid entry VLAN id");
722 		return false;
723 	}
724 
725 	return true;
726 }
727 
728 static bool is_valid_mdb_source(struct nlattr *attr, __be16 proto,
729 				struct netlink_ext_ack *extack)
730 {
731 	switch (proto) {
732 	case htons(ETH_P_IP):
733 		if (nla_len(attr) != sizeof(struct in_addr)) {
734 			NL_SET_ERR_MSG_MOD(extack, "IPv4 invalid source address length");
735 			return false;
736 		}
737 		if (ipv4_is_multicast(nla_get_in_addr(attr))) {
738 			NL_SET_ERR_MSG_MOD(extack, "IPv4 multicast source address is not allowed");
739 			return false;
740 		}
741 		break;
742 #if IS_ENABLED(CONFIG_IPV6)
743 	case htons(ETH_P_IPV6): {
744 		struct in6_addr src;
745 
746 		if (nla_len(attr) != sizeof(struct in6_addr)) {
747 			NL_SET_ERR_MSG_MOD(extack, "IPv6 invalid source address length");
748 			return false;
749 		}
750 		src = nla_get_in6_addr(attr);
751 		if (ipv6_addr_is_multicast(&src)) {
752 			NL_SET_ERR_MSG_MOD(extack, "IPv6 multicast source address is not allowed");
753 			return false;
754 		}
755 		break;
756 	}
757 #endif
758 	default:
759 		NL_SET_ERR_MSG_MOD(extack, "Invalid protocol used with source address");
760 		return false;
761 	}
762 
763 	return true;
764 }
765 
766 static const struct nla_policy br_mdbe_attrs_pol[MDBE_ATTR_MAX + 1] = {
767 	[MDBE_ATTR_SOURCE] = NLA_POLICY_RANGE(NLA_BINARY,
768 					      sizeof(struct in_addr),
769 					      sizeof(struct in6_addr)),
770 };
771 
772 static int br_mdb_parse(struct sk_buff *skb, struct nlmsghdr *nlh,
773 			struct net_device **pdev, struct br_mdb_entry **pentry,
774 			struct nlattr **mdb_attrs, struct netlink_ext_ack *extack)
775 {
776 	struct net *net = sock_net(skb->sk);
777 	struct br_mdb_entry *entry;
778 	struct br_port_msg *bpm;
779 	struct nlattr *tb[MDBA_SET_ENTRY_MAX+1];
780 	struct net_device *dev;
781 	int err;
782 
783 	err = nlmsg_parse_deprecated(nlh, sizeof(*bpm), tb,
784 				     MDBA_SET_ENTRY_MAX, NULL, NULL);
785 	if (err < 0)
786 		return err;
787 
788 	bpm = nlmsg_data(nlh);
789 	if (bpm->ifindex == 0) {
790 		NL_SET_ERR_MSG_MOD(extack, "Invalid bridge ifindex");
791 		return -EINVAL;
792 	}
793 
794 	dev = __dev_get_by_index(net, bpm->ifindex);
795 	if (dev == NULL) {
796 		NL_SET_ERR_MSG_MOD(extack, "Bridge device doesn't exist");
797 		return -ENODEV;
798 	}
799 
800 	if (!(dev->priv_flags & IFF_EBRIDGE)) {
801 		NL_SET_ERR_MSG_MOD(extack, "Device is not a bridge");
802 		return -EOPNOTSUPP;
803 	}
804 
805 	*pdev = dev;
806 
807 	if (!tb[MDBA_SET_ENTRY]) {
808 		NL_SET_ERR_MSG_MOD(extack, "Missing MDBA_SET_ENTRY attribute");
809 		return -EINVAL;
810 	}
811 	if (nla_len(tb[MDBA_SET_ENTRY]) != sizeof(struct br_mdb_entry)) {
812 		NL_SET_ERR_MSG_MOD(extack, "Invalid MDBA_SET_ENTRY attribute length");
813 		return -EINVAL;
814 	}
815 
816 	entry = nla_data(tb[MDBA_SET_ENTRY]);
817 	if (!is_valid_mdb_entry(entry, extack))
818 		return -EINVAL;
819 	*pentry = entry;
820 
821 	if (tb[MDBA_SET_ENTRY_ATTRS]) {
822 		err = nla_parse_nested(mdb_attrs, MDBE_ATTR_MAX,
823 				       tb[MDBA_SET_ENTRY_ATTRS],
824 				       br_mdbe_attrs_pol, extack);
825 		if (err)
826 			return err;
827 		if (mdb_attrs[MDBE_ATTR_SOURCE] &&
828 		    !is_valid_mdb_source(mdb_attrs[MDBE_ATTR_SOURCE],
829 					 entry->addr.proto, extack))
830 			return -EINVAL;
831 	} else {
832 		memset(mdb_attrs, 0,
833 		       sizeof(struct nlattr *) * (MDBE_ATTR_MAX + 1));
834 	}
835 
836 	return 0;
837 }
838 
839 static int br_mdb_add_group(struct net_bridge *br, struct net_bridge_port *port,
840 			    struct br_mdb_entry *entry,
841 			    struct nlattr **mdb_attrs,
842 			    struct netlink_ext_ack *extack)
843 {
844 	struct net_bridge_mdb_entry *mp, *star_mp;
845 	struct net_bridge_port_group *p;
846 	struct net_bridge_port_group __rcu **pp;
847 	struct br_ip group, star_group;
848 	unsigned long now = jiffies;
849 	unsigned char flags = 0;
850 	u8 filter_mode;
851 	int err;
852 
853 	__mdb_entry_to_br_ip(entry, &group, mdb_attrs);
854 
855 	/* host join errors which can happen before creating the group */
856 	if (!port) {
857 		/* don't allow any flags for host-joined groups */
858 		if (entry->state) {
859 			NL_SET_ERR_MSG_MOD(extack, "Flags are not allowed for host groups");
860 			return -EINVAL;
861 		}
862 		if (!br_multicast_is_star_g(&group)) {
863 			NL_SET_ERR_MSG_MOD(extack, "Groups with sources cannot be manually host joined");
864 			return -EINVAL;
865 		}
866 	}
867 
868 	if (br_group_is_l2(&group) && entry->state != MDB_PERMANENT) {
869 		NL_SET_ERR_MSG_MOD(extack, "Only permanent L2 entries allowed");
870 		return -EINVAL;
871 	}
872 
873 	mp = br_mdb_ip_get(br, &group);
874 	if (!mp) {
875 		mp = br_multicast_new_group(br, &group);
876 		err = PTR_ERR_OR_ZERO(mp);
877 		if (err)
878 			return err;
879 	}
880 
881 	/* host join */
882 	if (!port) {
883 		if (mp->host_joined) {
884 			NL_SET_ERR_MSG_MOD(extack, "Group is already joined by host");
885 			return -EEXIST;
886 		}
887 
888 		br_multicast_host_join(mp, false);
889 		br_mdb_notify(br->dev, mp, NULL, RTM_NEWMDB);
890 
891 		return 0;
892 	}
893 
894 	for (pp = &mp->ports;
895 	     (p = mlock_dereference(*pp, br)) != NULL;
896 	     pp = &p->next) {
897 		if (p->key.port == port) {
898 			NL_SET_ERR_MSG_MOD(extack, "Group is already joined by port");
899 			return -EEXIST;
900 		}
901 		if ((unsigned long)p->key.port < (unsigned long)port)
902 			break;
903 	}
904 
905 	filter_mode = br_multicast_is_star_g(&group) ? MCAST_EXCLUDE :
906 						       MCAST_INCLUDE;
907 
908 	if (entry->state == MDB_PERMANENT)
909 		flags |= MDB_PG_FLAGS_PERMANENT;
910 
911 	p = br_multicast_new_port_group(port, &group, *pp, flags, NULL,
912 					filter_mode, RTPROT_STATIC);
913 	if (unlikely(!p)) {
914 		NL_SET_ERR_MSG_MOD(extack, "Couldn't allocate new port group");
915 		return -ENOMEM;
916 	}
917 	rcu_assign_pointer(*pp, p);
918 	if (entry->state == MDB_TEMPORARY)
919 		mod_timer(&p->timer, now + br->multicast_membership_interval);
920 	br_mdb_notify(br->dev, mp, p, RTM_NEWMDB);
921 	/* if we are adding a new EXCLUDE port group (*,G) it needs to be also
922 	 * added to all S,G entries for proper replication, if we are adding
923 	 * a new INCLUDE port (S,G) then all of *,G EXCLUDE ports need to be
924 	 * added to it for proper replication
925 	 */
926 	if (br_multicast_should_handle_mode(br, group.proto)) {
927 		switch (filter_mode) {
928 		case MCAST_EXCLUDE:
929 			br_multicast_star_g_handle_mode(p, MCAST_EXCLUDE);
930 			break;
931 		case MCAST_INCLUDE:
932 			star_group = p->key.addr;
933 			memset(&star_group.src, 0, sizeof(star_group.src));
934 			star_mp = br_mdb_ip_get(br, &star_group);
935 			if (star_mp)
936 				br_multicast_sg_add_exclude_ports(star_mp, p);
937 			break;
938 		}
939 	}
940 
941 	return 0;
942 }
943 
944 static int __br_mdb_add(struct net *net, struct net_bridge *br,
945 			struct net_bridge_port *p,
946 			struct br_mdb_entry *entry,
947 			struct nlattr **mdb_attrs,
948 			struct netlink_ext_ack *extack)
949 {
950 	int ret;
951 
952 	spin_lock_bh(&br->multicast_lock);
953 	ret = br_mdb_add_group(br, p, entry, mdb_attrs, extack);
954 	spin_unlock_bh(&br->multicast_lock);
955 
956 	return ret;
957 }
958 
959 static int br_mdb_add(struct sk_buff *skb, struct nlmsghdr *nlh,
960 		      struct netlink_ext_ack *extack)
961 {
962 	struct nlattr *mdb_attrs[MDBE_ATTR_MAX + 1];
963 	struct net *net = sock_net(skb->sk);
964 	struct net_bridge_vlan_group *vg;
965 	struct net_bridge_port *p = NULL;
966 	struct net_device *dev, *pdev;
967 	struct br_mdb_entry *entry;
968 	struct net_bridge_vlan *v;
969 	struct net_bridge *br;
970 	int err;
971 
972 	err = br_mdb_parse(skb, nlh, &dev, &entry, mdb_attrs, extack);
973 	if (err < 0)
974 		return err;
975 
976 	br = netdev_priv(dev);
977 
978 	if (!netif_running(br->dev)) {
979 		NL_SET_ERR_MSG_MOD(extack, "Bridge device is not running");
980 		return -EINVAL;
981 	}
982 
983 	if (!br_opt_get(br, BROPT_MULTICAST_ENABLED)) {
984 		NL_SET_ERR_MSG_MOD(extack, "Bridge's multicast processing is disabled");
985 		return -EINVAL;
986 	}
987 
988 	if (entry->ifindex != br->dev->ifindex) {
989 		pdev = __dev_get_by_index(net, entry->ifindex);
990 		if (!pdev) {
991 			NL_SET_ERR_MSG_MOD(extack, "Port net device doesn't exist");
992 			return -ENODEV;
993 		}
994 
995 		p = br_port_get_rtnl(pdev);
996 		if (!p) {
997 			NL_SET_ERR_MSG_MOD(extack, "Net device is not a bridge port");
998 			return -EINVAL;
999 		}
1000 
1001 		if (p->br != br) {
1002 			NL_SET_ERR_MSG_MOD(extack, "Port belongs to a different bridge device");
1003 			return -EINVAL;
1004 		}
1005 		if (p->state == BR_STATE_DISABLED) {
1006 			NL_SET_ERR_MSG_MOD(extack, "Port is in disabled state");
1007 			return -EINVAL;
1008 		}
1009 		vg = nbp_vlan_group(p);
1010 	} else {
1011 		vg = br_vlan_group(br);
1012 	}
1013 
1014 	/* If vlan filtering is enabled and VLAN is not specified
1015 	 * install mdb entry on all vlans configured on the port.
1016 	 */
1017 	if (br_vlan_enabled(br->dev) && vg && entry->vid == 0) {
1018 		list_for_each_entry(v, &vg->vlan_list, vlist) {
1019 			entry->vid = v->vid;
1020 			err = __br_mdb_add(net, br, p, entry, mdb_attrs, extack);
1021 			if (err)
1022 				break;
1023 		}
1024 	} else {
1025 		err = __br_mdb_add(net, br, p, entry, mdb_attrs, extack);
1026 	}
1027 
1028 	return err;
1029 }
1030 
1031 static int __br_mdb_del(struct net_bridge *br, struct br_mdb_entry *entry,
1032 			struct nlattr **mdb_attrs)
1033 {
1034 	struct net_bridge_mdb_entry *mp;
1035 	struct net_bridge_port_group *p;
1036 	struct net_bridge_port_group __rcu **pp;
1037 	struct br_ip ip;
1038 	int err = -EINVAL;
1039 
1040 	if (!netif_running(br->dev) || !br_opt_get(br, BROPT_MULTICAST_ENABLED))
1041 		return -EINVAL;
1042 
1043 	__mdb_entry_to_br_ip(entry, &ip, mdb_attrs);
1044 
1045 	spin_lock_bh(&br->multicast_lock);
1046 	mp = br_mdb_ip_get(br, &ip);
1047 	if (!mp)
1048 		goto unlock;
1049 
1050 	/* host leave */
1051 	if (entry->ifindex == mp->br->dev->ifindex && mp->host_joined) {
1052 		br_multicast_host_leave(mp, false);
1053 		err = 0;
1054 		br_mdb_notify(br->dev, mp, NULL, RTM_DELMDB);
1055 		if (!mp->ports && netif_running(br->dev))
1056 			mod_timer(&mp->timer, jiffies);
1057 		goto unlock;
1058 	}
1059 
1060 	for (pp = &mp->ports;
1061 	     (p = mlock_dereference(*pp, br)) != NULL;
1062 	     pp = &p->next) {
1063 		if (!p->key.port || p->key.port->dev->ifindex != entry->ifindex)
1064 			continue;
1065 
1066 		if (p->key.port->state == BR_STATE_DISABLED)
1067 			goto unlock;
1068 
1069 		br_multicast_del_pg(mp, p, pp);
1070 		err = 0;
1071 		break;
1072 	}
1073 
1074 unlock:
1075 	spin_unlock_bh(&br->multicast_lock);
1076 	return err;
1077 }
1078 
1079 static int br_mdb_del(struct sk_buff *skb, struct nlmsghdr *nlh,
1080 		      struct netlink_ext_ack *extack)
1081 {
1082 	struct nlattr *mdb_attrs[MDBE_ATTR_MAX + 1];
1083 	struct net *net = sock_net(skb->sk);
1084 	struct net_bridge_vlan_group *vg;
1085 	struct net_bridge_port *p = NULL;
1086 	struct net_device *dev, *pdev;
1087 	struct br_mdb_entry *entry;
1088 	struct net_bridge_vlan *v;
1089 	struct net_bridge *br;
1090 	int err;
1091 
1092 	err = br_mdb_parse(skb, nlh, &dev, &entry, mdb_attrs, extack);
1093 	if (err < 0)
1094 		return err;
1095 
1096 	br = netdev_priv(dev);
1097 
1098 	if (entry->ifindex != br->dev->ifindex) {
1099 		pdev = __dev_get_by_index(net, entry->ifindex);
1100 		if (!pdev)
1101 			return -ENODEV;
1102 
1103 		p = br_port_get_rtnl(pdev);
1104 		if (!p || p->br != br || p->state == BR_STATE_DISABLED)
1105 			return -EINVAL;
1106 		vg = nbp_vlan_group(p);
1107 	} else {
1108 		vg = br_vlan_group(br);
1109 	}
1110 
1111 	/* If vlan filtering is enabled and VLAN is not specified
1112 	 * delete mdb entry on all vlans configured on the port.
1113 	 */
1114 	if (br_vlan_enabled(br->dev) && vg && entry->vid == 0) {
1115 		list_for_each_entry(v, &vg->vlan_list, vlist) {
1116 			entry->vid = v->vid;
1117 			err = __br_mdb_del(br, entry, mdb_attrs);
1118 		}
1119 	} else {
1120 		err = __br_mdb_del(br, entry, mdb_attrs);
1121 	}
1122 
1123 	return err;
1124 }
1125 
1126 void br_mdb_init(void)
1127 {
1128 	rtnl_register_module(THIS_MODULE, PF_BRIDGE, RTM_GETMDB, NULL, br_mdb_dump, 0);
1129 	rtnl_register_module(THIS_MODULE, PF_BRIDGE, RTM_NEWMDB, br_mdb_add, NULL, 0);
1130 	rtnl_register_module(THIS_MODULE, PF_BRIDGE, RTM_DELMDB, br_mdb_del, NULL, 0);
1131 }
1132 
1133 void br_mdb_uninit(void)
1134 {
1135 	rtnl_unregister(PF_BRIDGE, RTM_GETMDB);
1136 	rtnl_unregister(PF_BRIDGE, RTM_NEWMDB);
1137 	rtnl_unregister(PF_BRIDGE, RTM_DELMDB);
1138 }
1139