xref: /linux/net/bridge/br_if.c (revision c358f53871605a1a8d7ed6e544a05ea00e9c80cb)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *	Userspace interface
4  *	Linux ethernet bridge
5  *
6  *	Authors:
7  *	Lennert Buytenhek		<buytenh@gnu.org>
8  */
9 
10 #include <linux/kernel.h>
11 #include <linux/netdevice.h>
12 #include <linux/etherdevice.h>
13 #include <linux/netpoll.h>
14 #include <linux/ethtool.h>
15 #include <linux/if_arp.h>
16 #include <linux/module.h>
17 #include <linux/init.h>
18 #include <linux/rtnetlink.h>
19 #include <linux/if_ether.h>
20 #include <linux/slab.h>
21 #include <net/dsa.h>
22 #include <net/sock.h>
23 #include <linux/if_vlan.h>
24 #include <net/switchdev.h>
25 #include <net/net_namespace.h>
26 
27 #include "br_private.h"
28 
29 /*
30  * Determine initial path cost based on speed.
31  * using recommendations from 802.1d standard
32  *
33  * Since driver might sleep need to not be holding any locks.
34  */
35 static int port_cost(struct net_device *dev)
36 {
37 	struct ethtool_link_ksettings ecmd;
38 
39 	if (!__ethtool_get_link_ksettings(dev, &ecmd)) {
40 		switch (ecmd.base.speed) {
41 		case SPEED_10000:
42 			return 2;
43 		case SPEED_5000:
44 			return 3;
45 		case SPEED_2500:
46 			return 4;
47 		case SPEED_1000:
48 			return 5;
49 		case SPEED_100:
50 			return 19;
51 		case SPEED_10:
52 			return 100;
53 		case SPEED_UNKNOWN:
54 			return 100;
55 		default:
56 			if (ecmd.base.speed > SPEED_10000)
57 				return 1;
58 		}
59 	}
60 
61 	/* Old silly heuristics based on name */
62 	if (!strncmp(dev->name, "lec", 3))
63 		return 7;
64 
65 	if (!strncmp(dev->name, "plip", 4))
66 		return 2500;
67 
68 	return 100;	/* assume old 10Mbps */
69 }
70 
71 
72 /* Check for port carrier transitions. */
73 void br_port_carrier_check(struct net_bridge_port *p, bool *notified)
74 {
75 	struct net_device *dev = p->dev;
76 	struct net_bridge *br = p->br;
77 
78 	if (!(p->flags & BR_ADMIN_COST) &&
79 	    netif_running(dev) && netif_oper_up(dev))
80 		p->path_cost = port_cost(dev);
81 
82 	*notified = false;
83 	if (!netif_running(br->dev))
84 		return;
85 
86 	spin_lock_bh(&br->lock);
87 	if (netif_running(dev) && netif_oper_up(dev)) {
88 		if (p->state == BR_STATE_DISABLED) {
89 			br_stp_enable_port(p);
90 			*notified = true;
91 		}
92 	} else {
93 		if (p->state != BR_STATE_DISABLED) {
94 			br_stp_disable_port(p);
95 			*notified = true;
96 		}
97 	}
98 	spin_unlock_bh(&br->lock);
99 }
100 
101 static void br_port_set_promisc(struct net_bridge_port *p)
102 {
103 	int err = 0;
104 
105 	if (br_promisc_port(p))
106 		return;
107 
108 	err = dev_set_promiscuity(p->dev, 1);
109 	if (err)
110 		return;
111 
112 	br_fdb_unsync_static(p->br, p);
113 	p->flags |= BR_PROMISC;
114 }
115 
116 static void br_port_clear_promisc(struct net_bridge_port *p)
117 {
118 	int err;
119 
120 	/* Check if the port is already non-promisc or if it doesn't
121 	 * support UNICAST filtering.  Without unicast filtering support
122 	 * we'll end up re-enabling promisc mode anyway, so just check for
123 	 * it here.
124 	 */
125 	if (!br_promisc_port(p) || !(p->dev->priv_flags & IFF_UNICAST_FLT))
126 		return;
127 
128 	/* Since we'll be clearing the promisc mode, program the port
129 	 * first so that we don't have interruption in traffic.
130 	 */
131 	err = br_fdb_sync_static(p->br, p);
132 	if (err)
133 		return;
134 
135 	dev_set_promiscuity(p->dev, -1);
136 	p->flags &= ~BR_PROMISC;
137 }
138 
139 /* When a port is added or removed or when certain port flags
140  * change, this function is called to automatically manage
141  * promiscuity setting of all the bridge ports.  We are always called
142  * under RTNL so can skip using rcu primitives.
143  */
144 void br_manage_promisc(struct net_bridge *br)
145 {
146 	struct net_bridge_port *p;
147 	bool set_all = false;
148 
149 	/* If vlan filtering is disabled or bridge interface is placed
150 	 * into promiscuous mode, place all ports in promiscuous mode.
151 	 */
152 	if ((br->dev->flags & IFF_PROMISC) || !br_vlan_enabled(br->dev))
153 		set_all = true;
154 
155 	list_for_each_entry(p, &br->port_list, list) {
156 		if (set_all) {
157 			br_port_set_promisc(p);
158 		} else {
159 			/* If the number of auto-ports is <= 1, then all other
160 			 * ports will have their output configuration
161 			 * statically specified through fdbs.  Since ingress
162 			 * on the auto-port becomes forwarding/egress to other
163 			 * ports and egress configuration is statically known,
164 			 * we can say that ingress configuration of the
165 			 * auto-port is also statically known.
166 			 * This lets us disable promiscuous mode and write
167 			 * this config to hw.
168 			 */
169 			if (br->auto_cnt == 0 ||
170 			    (br->auto_cnt == 1 && br_auto_port(p)))
171 				br_port_clear_promisc(p);
172 			else
173 				br_port_set_promisc(p);
174 		}
175 	}
176 }
177 
178 int nbp_backup_change(struct net_bridge_port *p,
179 		      struct net_device *backup_dev)
180 {
181 	struct net_bridge_port *old_backup = rtnl_dereference(p->backup_port);
182 	struct net_bridge_port *backup_p = NULL;
183 
184 	ASSERT_RTNL();
185 
186 	if (backup_dev) {
187 		if (!netif_is_bridge_port(backup_dev))
188 			return -ENOENT;
189 
190 		backup_p = br_port_get_rtnl(backup_dev);
191 		if (backup_p->br != p->br)
192 			return -EINVAL;
193 	}
194 
195 	if (p == backup_p)
196 		return -EINVAL;
197 
198 	if (old_backup == backup_p)
199 		return 0;
200 
201 	/* if the backup link is already set, clear it */
202 	if (old_backup)
203 		old_backup->backup_redirected_cnt--;
204 
205 	if (backup_p)
206 		backup_p->backup_redirected_cnt++;
207 	rcu_assign_pointer(p->backup_port, backup_p);
208 
209 	return 0;
210 }
211 
212 static void nbp_backup_clear(struct net_bridge_port *p)
213 {
214 	nbp_backup_change(p, NULL);
215 	if (p->backup_redirected_cnt) {
216 		struct net_bridge_port *cur_p;
217 
218 		list_for_each_entry(cur_p, &p->br->port_list, list) {
219 			struct net_bridge_port *backup_p;
220 
221 			backup_p = rtnl_dereference(cur_p->backup_port);
222 			if (backup_p == p)
223 				nbp_backup_change(cur_p, NULL);
224 		}
225 	}
226 
227 	WARN_ON(rcu_access_pointer(p->backup_port) || p->backup_redirected_cnt);
228 }
229 
230 static void nbp_update_port_count(struct net_bridge *br)
231 {
232 	struct net_bridge_port *p;
233 	u32 cnt = 0;
234 
235 	list_for_each_entry(p, &br->port_list, list) {
236 		if (br_auto_port(p))
237 			cnt++;
238 	}
239 	if (br->auto_cnt != cnt) {
240 		br->auto_cnt = cnt;
241 		br_manage_promisc(br);
242 	}
243 }
244 
245 static void nbp_delete_promisc(struct net_bridge_port *p)
246 {
247 	/* If port is currently promiscuous, unset promiscuity.
248 	 * Otherwise, it is a static port so remove all addresses
249 	 * from it.
250 	 */
251 	dev_set_allmulti(p->dev, -1);
252 	if (br_promisc_port(p))
253 		dev_set_promiscuity(p->dev, -1);
254 	else
255 		br_fdb_unsync_static(p->br, p);
256 }
257 
258 static void release_nbp(struct kobject *kobj)
259 {
260 	struct net_bridge_port *p
261 		= container_of(kobj, struct net_bridge_port, kobj);
262 	kfree(p);
263 }
264 
265 static void brport_get_ownership(struct kobject *kobj, kuid_t *uid, kgid_t *gid)
266 {
267 	struct net_bridge_port *p = kobj_to_brport(kobj);
268 
269 	net_ns_get_ownership(dev_net(p->dev), uid, gid);
270 }
271 
272 static struct kobj_type brport_ktype = {
273 #ifdef CONFIG_SYSFS
274 	.sysfs_ops = &brport_sysfs_ops,
275 #endif
276 	.release = release_nbp,
277 	.get_ownership = brport_get_ownership,
278 };
279 
280 static void destroy_nbp(struct net_bridge_port *p)
281 {
282 	struct net_device *dev = p->dev;
283 
284 	p->br = NULL;
285 	p->dev = NULL;
286 	netdev_put(dev, &p->dev_tracker);
287 
288 	kobject_put(&p->kobj);
289 }
290 
291 static void destroy_nbp_rcu(struct rcu_head *head)
292 {
293 	struct net_bridge_port *p =
294 			container_of(head, struct net_bridge_port, rcu);
295 	destroy_nbp(p);
296 }
297 
298 static unsigned get_max_headroom(struct net_bridge *br)
299 {
300 	unsigned max_headroom = 0;
301 	struct net_bridge_port *p;
302 
303 	list_for_each_entry(p, &br->port_list, list) {
304 		unsigned dev_headroom = netdev_get_fwd_headroom(p->dev);
305 
306 		if (dev_headroom > max_headroom)
307 			max_headroom = dev_headroom;
308 	}
309 
310 	return max_headroom;
311 }
312 
313 static void update_headroom(struct net_bridge *br, int new_hr)
314 {
315 	struct net_bridge_port *p;
316 
317 	list_for_each_entry(p, &br->port_list, list)
318 		netdev_set_rx_headroom(p->dev, new_hr);
319 
320 	br->dev->needed_headroom = new_hr;
321 }
322 
323 /* Delete port(interface) from bridge is done in two steps.
324  * via RCU. First step, marks device as down. That deletes
325  * all the timers and stops new packets from flowing through.
326  *
327  * Final cleanup doesn't occur until after all CPU's finished
328  * processing packets.
329  *
330  * Protected from multiple admin operations by RTNL mutex
331  */
332 static void del_nbp(struct net_bridge_port *p)
333 {
334 	struct net_bridge *br = p->br;
335 	struct net_device *dev = p->dev;
336 
337 	sysfs_remove_link(br->ifobj, p->dev->name);
338 
339 	nbp_delete_promisc(p);
340 
341 	spin_lock_bh(&br->lock);
342 	br_stp_disable_port(p);
343 	spin_unlock_bh(&br->lock);
344 
345 	br_mrp_port_del(br, p);
346 	br_cfm_port_del(br, p);
347 
348 	br_ifinfo_notify(RTM_DELLINK, NULL, p);
349 
350 	list_del_rcu(&p->list);
351 	if (netdev_get_fwd_headroom(dev) == br->dev->needed_headroom)
352 		update_headroom(br, get_max_headroom(br));
353 	netdev_reset_rx_headroom(dev);
354 
355 	nbp_vlan_flush(p);
356 	br_fdb_delete_by_port(br, p, 0, 1);
357 	switchdev_deferred_process();
358 	nbp_backup_clear(p);
359 
360 	nbp_update_port_count(br);
361 
362 	netdev_upper_dev_unlink(dev, br->dev);
363 
364 	dev->priv_flags &= ~IFF_BRIDGE_PORT;
365 
366 	netdev_rx_handler_unregister(dev);
367 
368 	br_multicast_del_port(p);
369 
370 	kobject_uevent(&p->kobj, KOBJ_REMOVE);
371 	kobject_del(&p->kobj);
372 
373 	br_netpoll_disable(p);
374 
375 	call_rcu(&p->rcu, destroy_nbp_rcu);
376 }
377 
378 /* Delete bridge device */
379 void br_dev_delete(struct net_device *dev, struct list_head *head)
380 {
381 	struct net_bridge *br = netdev_priv(dev);
382 	struct net_bridge_port *p, *n;
383 
384 	list_for_each_entry_safe(p, n, &br->port_list, list) {
385 		del_nbp(p);
386 	}
387 
388 	br_recalculate_neigh_suppress_enabled(br);
389 
390 	br_fdb_delete_by_port(br, NULL, 0, 1);
391 
392 	cancel_delayed_work_sync(&br->gc_work);
393 
394 	br_sysfs_delbr(br->dev);
395 	unregister_netdevice_queue(br->dev, head);
396 }
397 
398 /* find an available port number */
399 static int find_portno(struct net_bridge *br)
400 {
401 	int index;
402 	struct net_bridge_port *p;
403 	unsigned long *inuse;
404 
405 	inuse = bitmap_zalloc(BR_MAX_PORTS, GFP_KERNEL);
406 	if (!inuse)
407 		return -ENOMEM;
408 
409 	__set_bit(0, inuse);	/* zero is reserved */
410 	list_for_each_entry(p, &br->port_list, list)
411 		__set_bit(p->port_no, inuse);
412 
413 	index = find_first_zero_bit(inuse, BR_MAX_PORTS);
414 	bitmap_free(inuse);
415 
416 	return (index >= BR_MAX_PORTS) ? -EXFULL : index;
417 }
418 
419 /* called with RTNL but without bridge lock */
420 static struct net_bridge_port *new_nbp(struct net_bridge *br,
421 				       struct net_device *dev)
422 {
423 	struct net_bridge_port *p;
424 	int index, err;
425 
426 	index = find_portno(br);
427 	if (index < 0)
428 		return ERR_PTR(index);
429 
430 	p = kzalloc(sizeof(*p), GFP_KERNEL);
431 	if (p == NULL)
432 		return ERR_PTR(-ENOMEM);
433 
434 	p->br = br;
435 	netdev_hold(dev, &p->dev_tracker, GFP_KERNEL);
436 	p->dev = dev;
437 	p->path_cost = port_cost(dev);
438 	p->priority = 0x8000 >> BR_PORT_BITS;
439 	p->port_no = index;
440 	p->flags = BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD | BR_BCAST_FLOOD;
441 	br_init_port(p);
442 	br_set_state(p, BR_STATE_DISABLED);
443 	br_stp_port_timer_init(p);
444 	err = br_multicast_add_port(p);
445 	if (err) {
446 		netdev_put(dev, &p->dev_tracker);
447 		kfree(p);
448 		p = ERR_PTR(err);
449 	}
450 
451 	return p;
452 }
453 
454 int br_add_bridge(struct net *net, const char *name)
455 {
456 	struct net_device *dev;
457 	int res;
458 
459 	dev = alloc_netdev(sizeof(struct net_bridge), name, NET_NAME_UNKNOWN,
460 			   br_dev_setup);
461 
462 	if (!dev)
463 		return -ENOMEM;
464 
465 	dev_net_set(dev, net);
466 	dev->rtnl_link_ops = &br_link_ops;
467 
468 	res = register_netdevice(dev);
469 	if (res)
470 		free_netdev(dev);
471 	return res;
472 }
473 
474 int br_del_bridge(struct net *net, const char *name)
475 {
476 	struct net_device *dev;
477 	int ret = 0;
478 
479 	dev = __dev_get_by_name(net, name);
480 	if (dev == NULL)
481 		ret =  -ENXIO; 	/* Could not find device */
482 
483 	else if (!netif_is_bridge_master(dev)) {
484 		/* Attempt to delete non bridge device! */
485 		ret = -EPERM;
486 	}
487 
488 	else if (dev->flags & IFF_UP) {
489 		/* Not shutdown yet. */
490 		ret = -EBUSY;
491 	}
492 
493 	else
494 		br_dev_delete(dev, NULL);
495 
496 	return ret;
497 }
498 
499 /* MTU of the bridge pseudo-device: ETH_DATA_LEN or the minimum of the ports */
500 static int br_mtu_min(const struct net_bridge *br)
501 {
502 	const struct net_bridge_port *p;
503 	int ret_mtu = 0;
504 
505 	list_for_each_entry(p, &br->port_list, list)
506 		if (!ret_mtu || ret_mtu > p->dev->mtu)
507 			ret_mtu = p->dev->mtu;
508 
509 	return ret_mtu ? ret_mtu : ETH_DATA_LEN;
510 }
511 
512 void br_mtu_auto_adjust(struct net_bridge *br)
513 {
514 	ASSERT_RTNL();
515 
516 	/* if the bridge MTU was manually configured don't mess with it */
517 	if (br_opt_get(br, BROPT_MTU_SET_BY_USER))
518 		return;
519 
520 	/* change to the minimum MTU and clear the flag which was set by
521 	 * the bridge ndo_change_mtu callback
522 	 */
523 	dev_set_mtu(br->dev, br_mtu_min(br));
524 	br_opt_toggle(br, BROPT_MTU_SET_BY_USER, false);
525 }
526 
527 static void br_set_gso_limits(struct net_bridge *br)
528 {
529 	unsigned int tso_max_size = TSO_MAX_SIZE;
530 	const struct net_bridge_port *p;
531 	u16 tso_max_segs = TSO_MAX_SEGS;
532 
533 	list_for_each_entry(p, &br->port_list, list) {
534 		tso_max_size = min(tso_max_size, p->dev->tso_max_size);
535 		tso_max_segs = min(tso_max_segs, p->dev->tso_max_segs);
536 	}
537 	netif_set_tso_max_size(br->dev, tso_max_size);
538 	netif_set_tso_max_segs(br->dev, tso_max_segs);
539 }
540 
541 /*
542  * Recomputes features using slave's features
543  */
544 netdev_features_t br_features_recompute(struct net_bridge *br,
545 	netdev_features_t features)
546 {
547 	struct net_bridge_port *p;
548 	netdev_features_t mask;
549 
550 	if (list_empty(&br->port_list))
551 		return features;
552 
553 	mask = features;
554 	features &= ~NETIF_F_ONE_FOR_ALL;
555 
556 	list_for_each_entry(p, &br->port_list, list) {
557 		features = netdev_increment_features(features,
558 						     p->dev->features, mask);
559 	}
560 	features = netdev_add_tso_features(features, mask);
561 
562 	return features;
563 }
564 
565 /* called with RTNL */
566 int br_add_if(struct net_bridge *br, struct net_device *dev,
567 	      struct netlink_ext_ack *extack)
568 {
569 	struct net_bridge_port *p;
570 	int err = 0;
571 	unsigned br_hr, dev_hr;
572 	bool changed_addr, fdb_synced = false;
573 
574 	/* Don't allow bridging non-ethernet like devices. */
575 	if ((dev->flags & IFF_LOOPBACK) ||
576 	    dev->type != ARPHRD_ETHER || dev->addr_len != ETH_ALEN ||
577 	    !is_valid_ether_addr(dev->dev_addr))
578 		return -EINVAL;
579 
580 	/* No bridging of bridges */
581 	if (dev->netdev_ops->ndo_start_xmit == br_dev_xmit) {
582 		NL_SET_ERR_MSG(extack,
583 			       "Can not enslave a bridge to a bridge");
584 		return -ELOOP;
585 	}
586 
587 	/* Device has master upper dev */
588 	if (netdev_master_upper_dev_get(dev))
589 		return -EBUSY;
590 
591 	/* No bridging devices that dislike that (e.g. wireless) */
592 	if (dev->priv_flags & IFF_DONT_BRIDGE) {
593 		NL_SET_ERR_MSG(extack,
594 			       "Device does not allow enslaving to a bridge");
595 		return -EOPNOTSUPP;
596 	}
597 
598 	p = new_nbp(br, dev);
599 	if (IS_ERR(p))
600 		return PTR_ERR(p);
601 
602 	call_netdevice_notifiers(NETDEV_JOIN, dev);
603 
604 	err = dev_set_allmulti(dev, 1);
605 	if (err) {
606 		br_multicast_del_port(p);
607 		netdev_put(dev, &p->dev_tracker);
608 		kfree(p);	/* kobject not yet init'd, manually free */
609 		goto err1;
610 	}
611 
612 	err = kobject_init_and_add(&p->kobj, &brport_ktype, &(dev->dev.kobj),
613 				   SYSFS_BRIDGE_PORT_ATTR);
614 	if (err)
615 		goto err2;
616 
617 	err = br_sysfs_addif(p);
618 	if (err)
619 		goto err2;
620 
621 	err = br_netpoll_enable(p);
622 	if (err)
623 		goto err3;
624 
625 	err = netdev_rx_handler_register(dev, br_get_rx_handler(dev), p);
626 	if (err)
627 		goto err4;
628 
629 	dev->priv_flags |= IFF_BRIDGE_PORT;
630 
631 	err = netdev_master_upper_dev_link(dev, br->dev, NULL, NULL, extack);
632 	if (err)
633 		goto err5;
634 
635 	dev_disable_lro(dev);
636 
637 	list_add_rcu(&p->list, &br->port_list);
638 
639 	nbp_update_port_count(br);
640 	if (!br_promisc_port(p) && (p->dev->priv_flags & IFF_UNICAST_FLT)) {
641 		/* When updating the port count we also update all ports'
642 		 * promiscuous mode.
643 		 * A port leaving promiscuous mode normally gets the bridge's
644 		 * fdb synced to the unicast filter (if supported), however,
645 		 * `br_port_clear_promisc` does not distinguish between
646 		 * non-promiscuous ports and *new* ports, so we need to
647 		 * sync explicitly here.
648 		 */
649 		fdb_synced = br_fdb_sync_static(br, p) == 0;
650 		if (!fdb_synced)
651 			netdev_err(dev, "failed to sync bridge static fdb addresses to this port\n");
652 	}
653 
654 	netdev_update_features(br->dev);
655 
656 	br_hr = br->dev->needed_headroom;
657 	dev_hr = netdev_get_fwd_headroom(dev);
658 	if (br_hr < dev_hr)
659 		update_headroom(br, dev_hr);
660 	else
661 		netdev_set_rx_headroom(dev, br_hr);
662 
663 	if (br_fdb_add_local(br, p, dev->dev_addr, 0))
664 		netdev_err(dev, "failed insert local address bridge forwarding table\n");
665 
666 	if (br->dev->addr_assign_type != NET_ADDR_SET) {
667 		/* Ask for permission to use this MAC address now, even if we
668 		 * don't end up choosing it below.
669 		 */
670 		err = dev_pre_changeaddr_notify(br->dev, dev->dev_addr, extack);
671 		if (err)
672 			goto err6;
673 	}
674 
675 	err = nbp_vlan_init(p, extack);
676 	if (err) {
677 		netdev_err(dev, "failed to initialize vlan filtering on this port\n");
678 		goto err6;
679 	}
680 
681 	spin_lock_bh(&br->lock);
682 	changed_addr = br_stp_recalculate_bridge_id(br);
683 
684 	if (netif_running(dev) && netif_oper_up(dev) &&
685 	    (br->dev->flags & IFF_UP))
686 		br_stp_enable_port(p);
687 	spin_unlock_bh(&br->lock);
688 
689 	br_ifinfo_notify(RTM_NEWLINK, NULL, p);
690 
691 	if (changed_addr)
692 		call_netdevice_notifiers(NETDEV_CHANGEADDR, br->dev);
693 
694 	br_mtu_auto_adjust(br);
695 	br_set_gso_limits(br);
696 
697 	kobject_uevent(&p->kobj, KOBJ_ADD);
698 
699 	return 0;
700 
701 err6:
702 	if (fdb_synced)
703 		br_fdb_unsync_static(br, p);
704 	list_del_rcu(&p->list);
705 	br_fdb_delete_by_port(br, p, 0, 1);
706 	nbp_update_port_count(br);
707 	netdev_upper_dev_unlink(dev, br->dev);
708 err5:
709 	dev->priv_flags &= ~IFF_BRIDGE_PORT;
710 	netdev_rx_handler_unregister(dev);
711 err4:
712 	br_netpoll_disable(p);
713 err3:
714 	sysfs_remove_link(br->ifobj, p->dev->name);
715 err2:
716 	br_multicast_del_port(p);
717 	netdev_put(dev, &p->dev_tracker);
718 	kobject_put(&p->kobj);
719 	dev_set_allmulti(dev, -1);
720 err1:
721 	return err;
722 }
723 
724 /* called with RTNL */
725 int br_del_if(struct net_bridge *br, struct net_device *dev)
726 {
727 	struct net_bridge_port *p;
728 	bool changed_addr;
729 
730 	p = br_port_get_rtnl(dev);
731 	if (!p || p->br != br)
732 		return -EINVAL;
733 
734 	/* Since more than one interface can be attached to a bridge,
735 	 * there still maybe an alternate path for netconsole to use;
736 	 * therefore there is no reason for a NETDEV_RELEASE event.
737 	 */
738 	del_nbp(p);
739 
740 	br_mtu_auto_adjust(br);
741 	br_set_gso_limits(br);
742 
743 	spin_lock_bh(&br->lock);
744 	changed_addr = br_stp_recalculate_bridge_id(br);
745 	spin_unlock_bh(&br->lock);
746 
747 	if (changed_addr)
748 		call_netdevice_notifiers(NETDEV_CHANGEADDR, br->dev);
749 
750 	netdev_update_features(br->dev);
751 
752 	return 0;
753 }
754 
755 void br_port_flags_change(struct net_bridge_port *p, unsigned long mask)
756 {
757 	struct net_bridge *br = p->br;
758 
759 	if (mask & BR_AUTO_MASK)
760 		nbp_update_port_count(br);
761 
762 	if (mask & BR_NEIGH_SUPPRESS)
763 		br_recalculate_neigh_suppress_enabled(br);
764 }
765 
766 bool br_port_flag_is_set(const struct net_device *dev, unsigned long flag)
767 {
768 	struct net_bridge_port *p;
769 
770 	p = br_port_get_rtnl_rcu(dev);
771 	if (!p)
772 		return false;
773 
774 	return p->flags & flag;
775 }
776 EXPORT_SYMBOL_GPL(br_port_flag_is_set);
777