xref: /freebsd/sys/net/if_bridge.c (revision 1f88aa09417f1cfb3929fd37531b1ab51213c2d6)
1 /*	$NetBSD: if_bridge.c,v 1.31 2005/06/01 19:45:34 jdc Exp $	*/
2 
3 /*-
4  * SPDX-License-Identifier: BSD-4-Clause
5  *
6  * Copyright 2001 Wasabi Systems, Inc.
7  * All rights reserved.
8  *
9  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  * 3. All advertising materials mentioning features or use of this software
20  *    must display the following acknowledgement:
21  *	This product includes software developed for the NetBSD Project by
22  *	Wasabi Systems, Inc.
23  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
24  *    or promote products derived from this software without specific prior
25  *    written permission.
26  *
27  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
28  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
31  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37  * POSSIBILITY OF SUCH DAMAGE.
38  */
39 
40 /*
41  * Copyright (c) 1999, 2000 Jason L. Wright (jason@thought.net)
42  * All rights reserved.
43  *
44  * Redistribution and use in source and binary forms, with or without
45  * modification, are permitted provided that the following conditions
46  * are met:
47  * 1. Redistributions of source code must retain the above copyright
48  *    notice, this list of conditions and the following disclaimer.
49  * 2. Redistributions in binary form must reproduce the above copyright
50  *    notice, this list of conditions and the following disclaimer in the
51  *    documentation and/or other materials provided with the distribution.
52  *
53  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
54  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
55  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
56  * DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
57  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
58  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
59  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
60  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
61  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
62  * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
63  * POSSIBILITY OF SUCH DAMAGE.
64  *
65  * OpenBSD: if_bridge.c,v 1.60 2001/06/15 03:38:33 itojun Exp
66  */
67 
68 /*
69  * Network interface bridge support.
70  *
71  * TODO:
72  *
73  *	- Currently only supports Ethernet-like interfaces (Ethernet,
74  *	  802.11, VLANs on Ethernet, etc.)  Figure out a nice way
75  *	  to bridge other types of interfaces (maybe consider
76  *	  heterogeneous bridges).
77  */
78 
79 #include <sys/cdefs.h>
80 __FBSDID("$FreeBSD$");
81 
82 #include "opt_inet.h"
83 #include "opt_inet6.h"
84 
85 #include <sys/param.h>
86 #include <sys/eventhandler.h>
87 #include <sys/mbuf.h>
88 #include <sys/malloc.h>
89 #include <sys/protosw.h>
90 #include <sys/systm.h>
91 #include <sys/jail.h>
92 #include <sys/time.h>
93 #include <sys/socket.h> /* for net/if.h */
94 #include <sys/sockio.h>
95 #include <sys/ctype.h>  /* string functions */
96 #include <sys/kernel.h>
97 #include <sys/random.h>
98 #include <sys/syslog.h>
99 #include <sys/sysctl.h>
100 #include <vm/uma.h>
101 #include <sys/module.h>
102 #include <sys/priv.h>
103 #include <sys/proc.h>
104 #include <sys/lock.h>
105 #include <sys/mutex.h>
106 
107 #include <net/bpf.h>
108 #include <net/if.h>
109 #include <net/if_clone.h>
110 #include <net/if_dl.h>
111 #include <net/if_types.h>
112 #include <net/if_var.h>
113 #include <net/pfil.h>
114 #include <net/vnet.h>
115 
116 #include <netinet/in.h>
117 #include <netinet/in_systm.h>
118 #include <netinet/in_var.h>
119 #include <netinet/ip.h>
120 #include <netinet/ip_var.h>
121 #ifdef INET6
122 #include <netinet/ip6.h>
123 #include <netinet6/ip6_var.h>
124 #include <netinet6/in6_ifattach.h>
125 #endif
126 #if defined(INET) || defined(INET6)
127 #include <netinet/ip_carp.h>
128 #endif
129 #include <machine/in_cksum.h>
130 #include <netinet/if_ether.h>
131 #include <net/bridgestp.h>
132 #include <net/if_bridgevar.h>
133 #include <net/if_llc.h>
134 #include <net/if_vlan_var.h>
135 
136 #include <net/route.h>
137 
138 #ifdef INET6
139 /*
140  * XXX: declare here to avoid to include many inet6 related files..
141  * should be more generalized?
142  */
143 extern void	nd6_setmtu(struct ifnet *);
144 #endif
145 
146 /*
147  * Size of the route hash table.  Must be a power of two.
148  */
149 #ifndef BRIDGE_RTHASH_SIZE
150 #define	BRIDGE_RTHASH_SIZE		1024
151 #endif
152 
153 #define	BRIDGE_RTHASH_MASK		(BRIDGE_RTHASH_SIZE - 1)
154 
155 /*
156  * Default maximum number of addresses to cache.
157  */
158 #ifndef BRIDGE_RTABLE_MAX
159 #define	BRIDGE_RTABLE_MAX		2000
160 #endif
161 
162 /*
163  * Timeout (in seconds) for entries learned dynamically.
164  */
165 #ifndef BRIDGE_RTABLE_TIMEOUT
166 #define	BRIDGE_RTABLE_TIMEOUT		(20 * 60)	/* same as ARP */
167 #endif
168 
169 /*
170  * Number of seconds between walks of the route list.
171  */
172 #ifndef BRIDGE_RTABLE_PRUNE_PERIOD
173 #define	BRIDGE_RTABLE_PRUNE_PERIOD	(5 * 60)
174 #endif
175 
176 /*
177  * List of capabilities to possibly mask on the member interface.
178  */
179 #define	BRIDGE_IFCAPS_MASK		(IFCAP_TOE|IFCAP_TSO|IFCAP_TXCSUM|\
180 					 IFCAP_TXCSUM_IPV6)
181 
182 /*
183  * List of capabilities to strip
184  */
185 #define	BRIDGE_IFCAPS_STRIP		IFCAP_LRO
186 
187 /*
188  * Bridge locking
189  *
190  * The bridge relies heavily on the epoch(9) system to protect its data
191  * structures. This means we can safely use CK_LISTs while in NET_EPOCH, but we
192  * must ensure there is only one writer at a time.
193  *
194  * That is: for read accesses we only need to be in NET_EPOCH, but for write
195  * accesses we must hold:
196  *
197  *  - BRIDGE_RT_LOCK, for any change to bridge_rtnodes
198  *  - BRIDGE_LOCK, for any other change
199  *
200  * The BRIDGE_LOCK is a sleepable lock, because it is held accross ioctl()
201  * calls to bridge member interfaces and these ioctl()s can sleep.
202  * The BRIDGE_RT_LOCK is a non-sleepable mutex, because it is sometimes
203  * required while we're in NET_EPOCH and then we're not allowed to sleep.
204  */
205 #define BRIDGE_LOCK_INIT(_sc)		do {			\
206 	sx_init(&(_sc)->sc_sx, "if_bridge");			\
207 	mtx_init(&(_sc)->sc_rt_mtx, "if_bridge rt", NULL, MTX_DEF);	\
208 } while (0)
209 #define BRIDGE_LOCK_DESTROY(_sc)	do {	\
210 	sx_destroy(&(_sc)->sc_sx);		\
211 	mtx_destroy(&(_sc)->sc_rt_mtx);		\
212 } while (0)
213 #define BRIDGE_LOCK(_sc)		sx_xlock(&(_sc)->sc_sx)
214 #define BRIDGE_UNLOCK(_sc)		sx_xunlock(&(_sc)->sc_sx)
215 #define BRIDGE_LOCK_ASSERT(_sc)		sx_assert(&(_sc)->sc_sx, SX_XLOCKED)
216 #define BRIDGE_LOCK_OR_NET_EPOCH_ASSERT(_sc)	\
217 	    MPASS(in_epoch(net_epoch_preempt) || sx_xlocked(&(_sc)->sc_sx))
218 #define BRIDGE_UNLOCK_ASSERT(_sc)	sx_assert(&(_sc)->sc_sx, SX_UNLOCKED)
219 #define BRIDGE_RT_LOCK(_sc)		mtx_lock(&(_sc)->sc_rt_mtx)
220 #define BRIDGE_RT_UNLOCK(_sc)		mtx_unlock(&(_sc)->sc_rt_mtx)
221 #define BRIDGE_RT_LOCK_ASSERT(_sc)	mtx_assert(&(_sc)->sc_rt_mtx, MA_OWNED)
222 #define BRIDGE_RT_LOCK_OR_NET_EPOCH_ASSERT(_sc)	\
223 	    MPASS(in_epoch(net_epoch_preempt) || mtx_owned(&(_sc)->sc_rt_mtx))
224 
225 /*
226  * Bridge interface list entry.
227  */
228 struct bridge_iflist {
229 	CK_LIST_ENTRY(bridge_iflist) bif_next;
230 	struct ifnet		*bif_ifp;	/* member if */
231 	struct bstp_port	bif_stp;	/* STP state */
232 	uint32_t		bif_flags;	/* member if flags */
233 	int			bif_savedcaps;	/* saved capabilities */
234 	uint32_t		bif_addrmax;	/* max # of addresses */
235 	uint32_t		bif_addrcnt;	/* cur. # of addresses */
236 	uint32_t		bif_addrexceeded;/* # of address violations */
237 	struct epoch_context	bif_epoch_ctx;
238 };
239 
240 /*
241  * Bridge route node.
242  */
243 struct bridge_rtnode {
244 	CK_LIST_ENTRY(bridge_rtnode) brt_hash;	/* hash table linkage */
245 	CK_LIST_ENTRY(bridge_rtnode) brt_list;	/* list linkage */
246 	struct bridge_iflist	*brt_dst;	/* destination if */
247 	unsigned long		brt_expire;	/* expiration time */
248 	uint8_t			brt_flags;	/* address flags */
249 	uint8_t			brt_addr[ETHER_ADDR_LEN];
250 	uint16_t		brt_vlan;	/* vlan id */
251 	struct	vnet		*brt_vnet;
252 	struct	epoch_context	brt_epoch_ctx;
253 };
254 #define	brt_ifp			brt_dst->bif_ifp
255 
256 /*
257  * Software state for each bridge.
258  */
259 struct bridge_softc {
260 	struct ifnet		*sc_ifp;	/* make this an interface */
261 	LIST_ENTRY(bridge_softc) sc_list;
262 	struct sx		sc_sx;
263 	struct mtx		sc_rt_mtx;
264 	uint32_t		sc_brtmax;	/* max # of addresses */
265 	uint32_t		sc_brtcnt;	/* cur. # of addresses */
266 	uint32_t		sc_brttimeout;	/* rt timeout in seconds */
267 	struct callout		sc_brcallout;	/* bridge callout */
268 	CK_LIST_HEAD(, bridge_iflist) sc_iflist;	/* member interface list */
269 	CK_LIST_HEAD(, bridge_rtnode) *sc_rthash;	/* our forwarding table */
270 	CK_LIST_HEAD(, bridge_rtnode) sc_rtlist;	/* list version of above */
271 	uint32_t		sc_rthash_key;	/* key for hash */
272 	CK_LIST_HEAD(, bridge_iflist) sc_spanlist;	/* span ports list */
273 	struct bstp_state	sc_stp;		/* STP state */
274 	uint32_t		sc_brtexceeded;	/* # of cache drops */
275 	struct ifnet		*sc_ifaddr;	/* member mac copied from */
276 	struct ether_addr	sc_defaddr;	/* Default MAC address */
277 	struct epoch_context	sc_epoch_ctx;
278 };
279 
280 VNET_DEFINE_STATIC(struct sx, bridge_list_sx);
281 #define	V_bridge_list_sx	VNET(bridge_list_sx)
282 static eventhandler_tag bridge_detach_cookie;
283 
284 int	bridge_rtable_prune_period = BRIDGE_RTABLE_PRUNE_PERIOD;
285 
286 VNET_DEFINE_STATIC(uma_zone_t, bridge_rtnode_zone);
287 #define	V_bridge_rtnode_zone	VNET(bridge_rtnode_zone)
288 
289 static int	bridge_clone_create(struct if_clone *, int, caddr_t);
290 static void	bridge_clone_destroy(struct ifnet *);
291 
292 static int	bridge_ioctl(struct ifnet *, u_long, caddr_t);
293 static void	bridge_mutecaps(struct bridge_softc *);
294 static void	bridge_set_ifcap(struct bridge_softc *, struct bridge_iflist *,
295 		    int);
296 static void	bridge_ifdetach(void *arg __unused, struct ifnet *);
297 static void	bridge_init(void *);
298 static void	bridge_dummynet(struct mbuf *, struct ifnet *);
299 static void	bridge_stop(struct ifnet *, int);
300 static int	bridge_transmit(struct ifnet *, struct mbuf *);
301 static void	bridge_qflush(struct ifnet *);
302 static struct mbuf *bridge_input(struct ifnet *, struct mbuf *);
303 static int	bridge_output(struct ifnet *, struct mbuf *, struct sockaddr *,
304 		    struct rtentry *);
305 static int	bridge_enqueue(struct bridge_softc *, struct ifnet *,
306 		    struct mbuf *);
307 static void	bridge_rtdelete(struct bridge_softc *, struct ifnet *ifp, int);
308 
309 static void	bridge_forward(struct bridge_softc *, struct bridge_iflist *,
310 		    struct mbuf *m);
311 
312 static void	bridge_timer(void *);
313 
314 static void	bridge_broadcast(struct bridge_softc *, struct ifnet *,
315 		    struct mbuf *, int);
316 static void	bridge_span(struct bridge_softc *, struct mbuf *);
317 
318 static int	bridge_rtupdate(struct bridge_softc *, const uint8_t *,
319 		    uint16_t, struct bridge_iflist *, int, uint8_t);
320 static struct ifnet *bridge_rtlookup(struct bridge_softc *, const uint8_t *,
321 		    uint16_t);
322 static void	bridge_rttrim(struct bridge_softc *);
323 static void	bridge_rtage(struct bridge_softc *);
324 static void	bridge_rtflush(struct bridge_softc *, int);
325 static int	bridge_rtdaddr(struct bridge_softc *, const uint8_t *,
326 		    uint16_t);
327 
328 static void	bridge_rtable_init(struct bridge_softc *);
329 static void	bridge_rtable_fini(struct bridge_softc *);
330 
331 static int	bridge_rtnode_addr_cmp(const uint8_t *, const uint8_t *);
332 static struct bridge_rtnode *bridge_rtnode_lookup(struct bridge_softc *,
333 		    const uint8_t *, uint16_t);
334 static int	bridge_rtnode_insert(struct bridge_softc *,
335 		    struct bridge_rtnode *);
336 static void	bridge_rtnode_destroy(struct bridge_softc *,
337 		    struct bridge_rtnode *);
338 static void	bridge_rtable_expire(struct ifnet *, int);
339 static void	bridge_state_change(struct ifnet *, int);
340 
341 static struct bridge_iflist *bridge_lookup_member(struct bridge_softc *,
342 		    const char *name);
343 static struct bridge_iflist *bridge_lookup_member_if(struct bridge_softc *,
344 		    struct ifnet *ifp);
345 static void	bridge_delete_member(struct bridge_softc *,
346 		    struct bridge_iflist *, int);
347 static void	bridge_delete_span(struct bridge_softc *,
348 		    struct bridge_iflist *);
349 
350 static int	bridge_ioctl_add(struct bridge_softc *, void *);
351 static int	bridge_ioctl_del(struct bridge_softc *, void *);
352 static int	bridge_ioctl_gifflags(struct bridge_softc *, void *);
353 static int	bridge_ioctl_sifflags(struct bridge_softc *, void *);
354 static int	bridge_ioctl_scache(struct bridge_softc *, void *);
355 static int	bridge_ioctl_gcache(struct bridge_softc *, void *);
356 static int	bridge_ioctl_gifs(struct bridge_softc *, void *);
357 static int	bridge_ioctl_rts(struct bridge_softc *, void *);
358 static int	bridge_ioctl_saddr(struct bridge_softc *, void *);
359 static int	bridge_ioctl_sto(struct bridge_softc *, void *);
360 static int	bridge_ioctl_gto(struct bridge_softc *, void *);
361 static int	bridge_ioctl_daddr(struct bridge_softc *, void *);
362 static int	bridge_ioctl_flush(struct bridge_softc *, void *);
363 static int	bridge_ioctl_gpri(struct bridge_softc *, void *);
364 static int	bridge_ioctl_spri(struct bridge_softc *, void *);
365 static int	bridge_ioctl_ght(struct bridge_softc *, void *);
366 static int	bridge_ioctl_sht(struct bridge_softc *, void *);
367 static int	bridge_ioctl_gfd(struct bridge_softc *, void *);
368 static int	bridge_ioctl_sfd(struct bridge_softc *, void *);
369 static int	bridge_ioctl_gma(struct bridge_softc *, void *);
370 static int	bridge_ioctl_sma(struct bridge_softc *, void *);
371 static int	bridge_ioctl_sifprio(struct bridge_softc *, void *);
372 static int	bridge_ioctl_sifcost(struct bridge_softc *, void *);
373 static int	bridge_ioctl_sifmaxaddr(struct bridge_softc *, void *);
374 static int	bridge_ioctl_addspan(struct bridge_softc *, void *);
375 static int	bridge_ioctl_delspan(struct bridge_softc *, void *);
376 static int	bridge_ioctl_gbparam(struct bridge_softc *, void *);
377 static int	bridge_ioctl_grte(struct bridge_softc *, void *);
378 static int	bridge_ioctl_gifsstp(struct bridge_softc *, void *);
379 static int	bridge_ioctl_sproto(struct bridge_softc *, void *);
380 static int	bridge_ioctl_stxhc(struct bridge_softc *, void *);
381 static int	bridge_pfil(struct mbuf **, struct ifnet *, struct ifnet *,
382 		    int);
383 static int	bridge_ip_checkbasic(struct mbuf **mp);
384 #ifdef INET6
385 static int	bridge_ip6_checkbasic(struct mbuf **mp);
386 #endif /* INET6 */
387 static int	bridge_fragment(struct ifnet *, struct mbuf **mp,
388 		    struct ether_header *, int, struct llc *);
389 static void	bridge_linkstate(struct ifnet *ifp);
390 static void	bridge_linkcheck(struct bridge_softc *sc);
391 
392 /* The default bridge vlan is 1 (IEEE 802.1Q-2003 Table 9-2) */
393 #define	VLANTAGOF(_m)	\
394     (_m->m_flags & M_VLANTAG) ? EVL_VLANOFTAG(_m->m_pkthdr.ether_vtag) : 1
395 
396 static struct bstp_cb_ops bridge_ops = {
397 	.bcb_state = bridge_state_change,
398 	.bcb_rtage = bridge_rtable_expire
399 };
400 
401 SYSCTL_DECL(_net_link);
402 static SYSCTL_NODE(_net_link, IFT_BRIDGE, bridge, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
403     "Bridge");
404 
405 /* only pass IP[46] packets when pfil is enabled */
406 VNET_DEFINE_STATIC(int, pfil_onlyip) = 1;
407 #define	V_pfil_onlyip	VNET(pfil_onlyip)
408 SYSCTL_INT(_net_link_bridge, OID_AUTO, pfil_onlyip,
409     CTLFLAG_RWTUN | CTLFLAG_VNET, &VNET_NAME(pfil_onlyip), 0,
410     "Only pass IP packets when pfil is enabled");
411 
412 /* run pfil hooks on the bridge interface */
413 VNET_DEFINE_STATIC(int, pfil_bridge) = 1;
414 #define	V_pfil_bridge	VNET(pfil_bridge)
415 SYSCTL_INT(_net_link_bridge, OID_AUTO, pfil_bridge,
416     CTLFLAG_RWTUN | CTLFLAG_VNET, &VNET_NAME(pfil_bridge), 0,
417     "Packet filter on the bridge interface");
418 
419 /* layer2 filter with ipfw */
420 VNET_DEFINE_STATIC(int, pfil_ipfw);
421 #define	V_pfil_ipfw	VNET(pfil_ipfw)
422 
423 /* layer2 ARP filter with ipfw */
424 VNET_DEFINE_STATIC(int, pfil_ipfw_arp);
425 #define	V_pfil_ipfw_arp	VNET(pfil_ipfw_arp)
426 SYSCTL_INT(_net_link_bridge, OID_AUTO, ipfw_arp,
427     CTLFLAG_RWTUN | CTLFLAG_VNET, &VNET_NAME(pfil_ipfw_arp), 0,
428     "Filter ARP packets through IPFW layer2");
429 
430 /* run pfil hooks on the member interface */
431 VNET_DEFINE_STATIC(int, pfil_member) = 1;
432 #define	V_pfil_member	VNET(pfil_member)
433 SYSCTL_INT(_net_link_bridge, OID_AUTO, pfil_member,
434     CTLFLAG_RWTUN | CTLFLAG_VNET, &VNET_NAME(pfil_member), 0,
435     "Packet filter on the member interface");
436 
437 /* run pfil hooks on the physical interface for locally destined packets */
438 VNET_DEFINE_STATIC(int, pfil_local_phys);
439 #define	V_pfil_local_phys	VNET(pfil_local_phys)
440 SYSCTL_INT(_net_link_bridge, OID_AUTO, pfil_local_phys,
441     CTLFLAG_RWTUN | CTLFLAG_VNET, &VNET_NAME(pfil_local_phys), 0,
442     "Packet filter on the physical interface for locally destined packets");
443 
444 /* log STP state changes */
445 VNET_DEFINE_STATIC(int, log_stp);
446 #define	V_log_stp	VNET(log_stp)
447 SYSCTL_INT(_net_link_bridge, OID_AUTO, log_stp,
448     CTLFLAG_RWTUN | CTLFLAG_VNET, &VNET_NAME(log_stp), 0,
449     "Log STP state changes");
450 
451 /* share MAC with first bridge member */
452 VNET_DEFINE_STATIC(int, bridge_inherit_mac);
453 #define	V_bridge_inherit_mac	VNET(bridge_inherit_mac)
454 SYSCTL_INT(_net_link_bridge, OID_AUTO, inherit_mac,
455     CTLFLAG_RWTUN | CTLFLAG_VNET, &VNET_NAME(bridge_inherit_mac), 0,
456     "Inherit MAC address from the first bridge member");
457 
458 VNET_DEFINE_STATIC(int, allow_llz_overlap) = 0;
459 #define	V_allow_llz_overlap	VNET(allow_llz_overlap)
460 SYSCTL_INT(_net_link_bridge, OID_AUTO, allow_llz_overlap,
461     CTLFLAG_RW | CTLFLAG_VNET, &VNET_NAME(allow_llz_overlap), 0,
462     "Allow overlap of link-local scope "
463     "zones of a bridge interface and the member interfaces");
464 
465 struct bridge_control {
466 	int	(*bc_func)(struct bridge_softc *, void *);
467 	int	bc_argsize;
468 	int	bc_flags;
469 };
470 
471 #define	BC_F_COPYIN		0x01	/* copy arguments in */
472 #define	BC_F_COPYOUT		0x02	/* copy arguments out */
473 #define	BC_F_SUSER		0x04	/* do super-user check */
474 
475 const struct bridge_control bridge_control_table[] = {
476 	{ bridge_ioctl_add,		sizeof(struct ifbreq),
477 	  BC_F_COPYIN|BC_F_SUSER },
478 	{ bridge_ioctl_del,		sizeof(struct ifbreq),
479 	  BC_F_COPYIN|BC_F_SUSER },
480 
481 	{ bridge_ioctl_gifflags,	sizeof(struct ifbreq),
482 	  BC_F_COPYIN|BC_F_COPYOUT },
483 	{ bridge_ioctl_sifflags,	sizeof(struct ifbreq),
484 	  BC_F_COPYIN|BC_F_SUSER },
485 
486 	{ bridge_ioctl_scache,		sizeof(struct ifbrparam),
487 	  BC_F_COPYIN|BC_F_SUSER },
488 	{ bridge_ioctl_gcache,		sizeof(struct ifbrparam),
489 	  BC_F_COPYOUT },
490 
491 	{ bridge_ioctl_gifs,		sizeof(struct ifbifconf),
492 	  BC_F_COPYIN|BC_F_COPYOUT },
493 	{ bridge_ioctl_rts,		sizeof(struct ifbaconf),
494 	  BC_F_COPYIN|BC_F_COPYOUT },
495 
496 	{ bridge_ioctl_saddr,		sizeof(struct ifbareq),
497 	  BC_F_COPYIN|BC_F_SUSER },
498 
499 	{ bridge_ioctl_sto,		sizeof(struct ifbrparam),
500 	  BC_F_COPYIN|BC_F_SUSER },
501 	{ bridge_ioctl_gto,		sizeof(struct ifbrparam),
502 	  BC_F_COPYOUT },
503 
504 	{ bridge_ioctl_daddr,		sizeof(struct ifbareq),
505 	  BC_F_COPYIN|BC_F_SUSER },
506 
507 	{ bridge_ioctl_flush,		sizeof(struct ifbreq),
508 	  BC_F_COPYIN|BC_F_SUSER },
509 
510 	{ bridge_ioctl_gpri,		sizeof(struct ifbrparam),
511 	  BC_F_COPYOUT },
512 	{ bridge_ioctl_spri,		sizeof(struct ifbrparam),
513 	  BC_F_COPYIN|BC_F_SUSER },
514 
515 	{ bridge_ioctl_ght,		sizeof(struct ifbrparam),
516 	  BC_F_COPYOUT },
517 	{ bridge_ioctl_sht,		sizeof(struct ifbrparam),
518 	  BC_F_COPYIN|BC_F_SUSER },
519 
520 	{ bridge_ioctl_gfd,		sizeof(struct ifbrparam),
521 	  BC_F_COPYOUT },
522 	{ bridge_ioctl_sfd,		sizeof(struct ifbrparam),
523 	  BC_F_COPYIN|BC_F_SUSER },
524 
525 	{ bridge_ioctl_gma,		sizeof(struct ifbrparam),
526 	  BC_F_COPYOUT },
527 	{ bridge_ioctl_sma,		sizeof(struct ifbrparam),
528 	  BC_F_COPYIN|BC_F_SUSER },
529 
530 	{ bridge_ioctl_sifprio,		sizeof(struct ifbreq),
531 	  BC_F_COPYIN|BC_F_SUSER },
532 
533 	{ bridge_ioctl_sifcost,		sizeof(struct ifbreq),
534 	  BC_F_COPYIN|BC_F_SUSER },
535 
536 	{ bridge_ioctl_addspan,		sizeof(struct ifbreq),
537 	  BC_F_COPYIN|BC_F_SUSER },
538 	{ bridge_ioctl_delspan,		sizeof(struct ifbreq),
539 	  BC_F_COPYIN|BC_F_SUSER },
540 
541 	{ bridge_ioctl_gbparam,		sizeof(struct ifbropreq),
542 	  BC_F_COPYOUT },
543 
544 	{ bridge_ioctl_grte,		sizeof(struct ifbrparam),
545 	  BC_F_COPYOUT },
546 
547 	{ bridge_ioctl_gifsstp,		sizeof(struct ifbpstpconf),
548 	  BC_F_COPYIN|BC_F_COPYOUT },
549 
550 	{ bridge_ioctl_sproto,		sizeof(struct ifbrparam),
551 	  BC_F_COPYIN|BC_F_SUSER },
552 
553 	{ bridge_ioctl_stxhc,		sizeof(struct ifbrparam),
554 	  BC_F_COPYIN|BC_F_SUSER },
555 
556 	{ bridge_ioctl_sifmaxaddr,	sizeof(struct ifbreq),
557 	  BC_F_COPYIN|BC_F_SUSER },
558 
559 };
560 const int bridge_control_table_size = nitems(bridge_control_table);
561 
562 VNET_DEFINE_STATIC(LIST_HEAD(, bridge_softc), bridge_list);
563 #define	V_bridge_list	VNET(bridge_list)
564 #define	BRIDGE_LIST_LOCK_INIT(x)	sx_init(&V_bridge_list_sx,	\
565 					    "if_bridge list")
566 #define	BRIDGE_LIST_LOCK_DESTROY(x)	sx_destroy(&V_bridge_list_sx)
567 #define	BRIDGE_LIST_LOCK(x)		sx_xlock(&V_bridge_list_sx)
568 #define	BRIDGE_LIST_UNLOCK(x)		sx_xunlock(&V_bridge_list_sx)
569 
570 VNET_DEFINE_STATIC(struct if_clone *, bridge_cloner);
571 #define	V_bridge_cloner	VNET(bridge_cloner)
572 
573 static const char bridge_name[] = "bridge";
574 
575 static void
576 vnet_bridge_init(const void *unused __unused)
577 {
578 
579 	V_bridge_rtnode_zone = uma_zcreate("bridge_rtnode",
580 	    sizeof(struct bridge_rtnode), NULL, NULL, NULL, NULL,
581 	    UMA_ALIGN_PTR, 0);
582 	BRIDGE_LIST_LOCK_INIT();
583 	LIST_INIT(&V_bridge_list);
584 	V_bridge_cloner = if_clone_simple(bridge_name,
585 	    bridge_clone_create, bridge_clone_destroy, 0);
586 }
587 VNET_SYSINIT(vnet_bridge_init, SI_SUB_PROTO_IFATTACHDOMAIN, SI_ORDER_ANY,
588     vnet_bridge_init, NULL);
589 
590 static void
591 vnet_bridge_uninit(const void *unused __unused)
592 {
593 
594 	if_clone_detach(V_bridge_cloner);
595 	V_bridge_cloner = NULL;
596 	BRIDGE_LIST_LOCK_DESTROY();
597 
598 	/* Callbacks may use the UMA zone. */
599 	epoch_drain_callbacks(net_epoch_preempt);
600 
601 	uma_zdestroy(V_bridge_rtnode_zone);
602 }
603 VNET_SYSUNINIT(vnet_bridge_uninit, SI_SUB_PSEUDO, SI_ORDER_ANY,
604     vnet_bridge_uninit, NULL);
605 
606 static int
607 bridge_modevent(module_t mod, int type, void *data)
608 {
609 
610 	switch (type) {
611 	case MOD_LOAD:
612 		bridge_dn_p = bridge_dummynet;
613 		bridge_detach_cookie = EVENTHANDLER_REGISTER(
614 		    ifnet_departure_event, bridge_ifdetach, NULL,
615 		    EVENTHANDLER_PRI_ANY);
616 		break;
617 	case MOD_UNLOAD:
618 		EVENTHANDLER_DEREGISTER(ifnet_departure_event,
619 		    bridge_detach_cookie);
620 		bridge_dn_p = NULL;
621 		break;
622 	default:
623 		return (EOPNOTSUPP);
624 	}
625 	return (0);
626 }
627 
628 static moduledata_t bridge_mod = {
629 	"if_bridge",
630 	bridge_modevent,
631 	0
632 };
633 
634 DECLARE_MODULE(if_bridge, bridge_mod, SI_SUB_PSEUDO, SI_ORDER_ANY);
635 MODULE_VERSION(if_bridge, 1);
636 MODULE_DEPEND(if_bridge, bridgestp, 1, 1, 1);
637 
638 /*
639  * handler for net.link.bridge.ipfw
640  */
641 static int
642 sysctl_pfil_ipfw(SYSCTL_HANDLER_ARGS)
643 {
644 	int enable = V_pfil_ipfw;
645 	int error;
646 
647 	error = sysctl_handle_int(oidp, &enable, 0, req);
648 	enable &= 1;
649 
650 	if (enable != V_pfil_ipfw) {
651 		V_pfil_ipfw = enable;
652 
653 		/*
654 		 * Disable pfil so that ipfw doesnt run twice, if the user
655 		 * really wants both then they can re-enable pfil_bridge and/or
656 		 * pfil_member. Also allow non-ip packets as ipfw can filter by
657 		 * layer2 type.
658 		 */
659 		if (V_pfil_ipfw) {
660 			V_pfil_onlyip = 0;
661 			V_pfil_bridge = 0;
662 			V_pfil_member = 0;
663 		}
664 	}
665 
666 	return (error);
667 }
668 SYSCTL_PROC(_net_link_bridge, OID_AUTO, ipfw,
669     CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_VNET | CTLFLAG_NEEDGIANT,
670     &VNET_NAME(pfil_ipfw), 0, &sysctl_pfil_ipfw, "I",
671     "Layer2 filter with IPFW");
672 
673 #ifdef VIMAGE
674 static void
675 bridge_reassign(struct ifnet *ifp, struct vnet *newvnet, char *arg)
676 {
677 	struct bridge_softc *sc = ifp->if_softc;
678 	struct bridge_iflist *bif;
679 
680 	BRIDGE_LOCK(sc);
681 
682 	while ((bif = CK_LIST_FIRST(&sc->sc_iflist)) != NULL)
683 		bridge_delete_member(sc, bif, 0);
684 
685 	while ((bif = CK_LIST_FIRST(&sc->sc_spanlist)) != NULL) {
686 		bridge_delete_span(sc, bif);
687 	}
688 
689 	BRIDGE_UNLOCK(sc);
690 
691 	ether_reassign(ifp, newvnet, arg);
692 }
693 #endif
694 
695 /*
696  * bridge_clone_create:
697  *
698  *	Create a new bridge instance.
699  */
700 static int
701 bridge_clone_create(struct if_clone *ifc, int unit, caddr_t params)
702 {
703 	struct bridge_softc *sc;
704 	struct ifnet *ifp;
705 
706 	sc = malloc(sizeof(*sc), M_DEVBUF, M_WAITOK|M_ZERO);
707 	ifp = sc->sc_ifp = if_alloc(IFT_ETHER);
708 	if (ifp == NULL) {
709 		free(sc, M_DEVBUF);
710 		return (ENOSPC);
711 	}
712 
713 	BRIDGE_LOCK_INIT(sc);
714 	sc->sc_brtmax = BRIDGE_RTABLE_MAX;
715 	sc->sc_brttimeout = BRIDGE_RTABLE_TIMEOUT;
716 
717 	/* Initialize our routing table. */
718 	bridge_rtable_init(sc);
719 
720 	callout_init_mtx(&sc->sc_brcallout, &sc->sc_rt_mtx, 0);
721 
722 	CK_LIST_INIT(&sc->sc_iflist);
723 	CK_LIST_INIT(&sc->sc_spanlist);
724 
725 	ifp->if_softc = sc;
726 	if_initname(ifp, bridge_name, unit);
727 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
728 	ifp->if_ioctl = bridge_ioctl;
729 	ifp->if_transmit = bridge_transmit;
730 	ifp->if_qflush = bridge_qflush;
731 	ifp->if_init = bridge_init;
732 	ifp->if_type = IFT_BRIDGE;
733 
734 	ether_gen_addr(ifp, &sc->sc_defaddr);
735 
736 	bstp_attach(&sc->sc_stp, &bridge_ops);
737 	ether_ifattach(ifp, sc->sc_defaddr.octet);
738 	/* Now undo some of the damage... */
739 	ifp->if_baudrate = 0;
740 	ifp->if_type = IFT_BRIDGE;
741 #ifdef VIMAGE
742 	ifp->if_reassign = bridge_reassign;
743 #endif
744 
745 	BRIDGE_LIST_LOCK();
746 	LIST_INSERT_HEAD(&V_bridge_list, sc, sc_list);
747 	BRIDGE_LIST_UNLOCK();
748 
749 	return (0);
750 }
751 
752 static void
753 bridge_clone_destroy_cb(struct epoch_context *ctx)
754 {
755 	struct bridge_softc *sc;
756 
757 	sc = __containerof(ctx, struct bridge_softc, sc_epoch_ctx);
758 
759 	BRIDGE_LOCK_DESTROY(sc);
760 	free(sc, M_DEVBUF);
761 }
762 
763 /*
764  * bridge_clone_destroy:
765  *
766  *	Destroy a bridge instance.
767  */
768 static void
769 bridge_clone_destroy(struct ifnet *ifp)
770 {
771 	struct bridge_softc *sc = ifp->if_softc;
772 	struct bridge_iflist *bif;
773 	struct epoch_tracker et;
774 
775 	BRIDGE_LOCK(sc);
776 
777 	bridge_stop(ifp, 1);
778 	ifp->if_flags &= ~IFF_UP;
779 
780 	while ((bif = CK_LIST_FIRST(&sc->sc_iflist)) != NULL)
781 		bridge_delete_member(sc, bif, 0);
782 
783 	while ((bif = CK_LIST_FIRST(&sc->sc_spanlist)) != NULL) {
784 		bridge_delete_span(sc, bif);
785 	}
786 
787 	/* Tear down the routing table. */
788 	bridge_rtable_fini(sc);
789 
790 	BRIDGE_UNLOCK(sc);
791 
792 	NET_EPOCH_ENTER(et);
793 
794 	callout_drain(&sc->sc_brcallout);
795 
796 	BRIDGE_LIST_LOCK();
797 	LIST_REMOVE(sc, sc_list);
798 	BRIDGE_LIST_UNLOCK();
799 
800 	bstp_detach(&sc->sc_stp);
801 	NET_EPOCH_EXIT(et);
802 
803 	ether_ifdetach(ifp);
804 	if_free(ifp);
805 
806 	NET_EPOCH_CALL(bridge_clone_destroy_cb, &sc->sc_epoch_ctx);
807 }
808 
809 /*
810  * bridge_ioctl:
811  *
812  *	Handle a control request from the operator.
813  */
814 static int
815 bridge_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
816 {
817 	struct bridge_softc *sc = ifp->if_softc;
818 	struct ifreq *ifr = (struct ifreq *)data;
819 	struct bridge_iflist *bif;
820 	struct thread *td = curthread;
821 	union {
822 		struct ifbreq ifbreq;
823 		struct ifbifconf ifbifconf;
824 		struct ifbareq ifbareq;
825 		struct ifbaconf ifbaconf;
826 		struct ifbrparam ifbrparam;
827 		struct ifbropreq ifbropreq;
828 	} args;
829 	struct ifdrv *ifd = (struct ifdrv *) data;
830 	const struct bridge_control *bc;
831 	int error = 0, oldmtu;
832 
833 	BRIDGE_LOCK(sc);
834 
835 	switch (cmd) {
836 	case SIOCADDMULTI:
837 	case SIOCDELMULTI:
838 		break;
839 
840 	case SIOCGDRVSPEC:
841 	case SIOCSDRVSPEC:
842 		if (ifd->ifd_cmd >= bridge_control_table_size) {
843 			error = EINVAL;
844 			break;
845 		}
846 		bc = &bridge_control_table[ifd->ifd_cmd];
847 
848 		if (cmd == SIOCGDRVSPEC &&
849 		    (bc->bc_flags & BC_F_COPYOUT) == 0) {
850 			error = EINVAL;
851 			break;
852 		}
853 		else if (cmd == SIOCSDRVSPEC &&
854 		    (bc->bc_flags & BC_F_COPYOUT) != 0) {
855 			error = EINVAL;
856 			break;
857 		}
858 
859 		if (bc->bc_flags & BC_F_SUSER) {
860 			error = priv_check(td, PRIV_NET_BRIDGE);
861 			if (error)
862 				break;
863 		}
864 
865 		if (ifd->ifd_len != bc->bc_argsize ||
866 		    ifd->ifd_len > sizeof(args)) {
867 			error = EINVAL;
868 			break;
869 		}
870 
871 		bzero(&args, sizeof(args));
872 		if (bc->bc_flags & BC_F_COPYIN) {
873 			error = copyin(ifd->ifd_data, &args, ifd->ifd_len);
874 			if (error)
875 				break;
876 		}
877 
878 		oldmtu = ifp->if_mtu;
879 		error = (*bc->bc_func)(sc, &args);
880 		if (error)
881 			break;
882 
883 		/*
884 		 * Bridge MTU may change during addition of the first port.
885 		 * If it did, do network layer specific procedure.
886 		 */
887 		if (ifp->if_mtu != oldmtu) {
888 #ifdef INET6
889 			nd6_setmtu(ifp);
890 #endif
891 			rt_updatemtu(ifp);
892 		}
893 
894 		if (bc->bc_flags & BC_F_COPYOUT)
895 			error = copyout(&args, ifd->ifd_data, ifd->ifd_len);
896 
897 		break;
898 
899 	case SIOCSIFFLAGS:
900 		if (!(ifp->if_flags & IFF_UP) &&
901 		    (ifp->if_drv_flags & IFF_DRV_RUNNING)) {
902 			/*
903 			 * If interface is marked down and it is running,
904 			 * then stop and disable it.
905 			 */
906 			bridge_stop(ifp, 1);
907 		} else if ((ifp->if_flags & IFF_UP) &&
908 		    !(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
909 			/*
910 			 * If interface is marked up and it is stopped, then
911 			 * start it.
912 			 */
913 			BRIDGE_UNLOCK(sc);
914 			(*ifp->if_init)(sc);
915 			BRIDGE_LOCK(sc);
916 		}
917 		break;
918 
919 	case SIOCSIFMTU:
920 		oldmtu = sc->sc_ifp->if_mtu;
921 
922 		if (ifr->ifr_mtu < 576) {
923 			error = EINVAL;
924 			break;
925 		}
926 		if (CK_LIST_EMPTY(&sc->sc_iflist)) {
927 			sc->sc_ifp->if_mtu = ifr->ifr_mtu;
928 			break;
929 		}
930 		CK_LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
931 			error = (*bif->bif_ifp->if_ioctl)(bif->bif_ifp,
932 			    SIOCSIFMTU, (caddr_t)ifr);
933 			if (error != 0) {
934 				log(LOG_NOTICE, "%s: invalid MTU: %u for"
935 				    " member %s\n", sc->sc_ifp->if_xname,
936 				    ifr->ifr_mtu,
937 				    bif->bif_ifp->if_xname);
938 				error = EINVAL;
939 				break;
940 			}
941 		}
942 		if (error) {
943 			/* Restore the previous MTU on all member interfaces. */
944 			ifr->ifr_mtu = oldmtu;
945 			CK_LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
946 				(*bif->bif_ifp->if_ioctl)(bif->bif_ifp,
947 				    SIOCSIFMTU, (caddr_t)ifr);
948 			}
949 		} else {
950 			sc->sc_ifp->if_mtu = ifr->ifr_mtu;
951 		}
952 		break;
953 	default:
954 		/*
955 		 * drop the lock as ether_ioctl() will call bridge_start() and
956 		 * cause the lock to be recursed.
957 		 */
958 		BRIDGE_UNLOCK(sc);
959 		error = ether_ioctl(ifp, cmd, data);
960 		BRIDGE_LOCK(sc);
961 		break;
962 	}
963 
964 	BRIDGE_UNLOCK(sc);
965 
966 	return (error);
967 }
968 
969 /*
970  * bridge_mutecaps:
971  *
972  *	Clear or restore unwanted capabilities on the member interface
973  */
974 static void
975 bridge_mutecaps(struct bridge_softc *sc)
976 {
977 	struct bridge_iflist *bif;
978 	int enabled, mask;
979 
980 	BRIDGE_LOCK_ASSERT(sc);
981 
982 	/* Initial bitmask of capabilities to test */
983 	mask = BRIDGE_IFCAPS_MASK;
984 
985 	CK_LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
986 		/* Every member must support it or its disabled */
987 		mask &= bif->bif_savedcaps;
988 	}
989 
990 	CK_LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
991 		enabled = bif->bif_ifp->if_capenable;
992 		enabled &= ~BRIDGE_IFCAPS_STRIP;
993 		/* strip off mask bits and enable them again if allowed */
994 		enabled &= ~BRIDGE_IFCAPS_MASK;
995 		enabled |= mask;
996 		bridge_set_ifcap(sc, bif, enabled);
997 	}
998 }
999 
1000 static void
1001 bridge_set_ifcap(struct bridge_softc *sc, struct bridge_iflist *bif, int set)
1002 {
1003 	struct ifnet *ifp = bif->bif_ifp;
1004 	struct ifreq ifr;
1005 	int error, mask, stuck;
1006 
1007 	bzero(&ifr, sizeof(ifr));
1008 	ifr.ifr_reqcap = set;
1009 
1010 	if (ifp->if_capenable != set) {
1011 		error = (*ifp->if_ioctl)(ifp, SIOCSIFCAP, (caddr_t)&ifr);
1012 		if (error)
1013 			if_printf(sc->sc_ifp,
1014 			    "error setting capabilities on %s: %d\n",
1015 			    ifp->if_xname, error);
1016 		mask = BRIDGE_IFCAPS_MASK | BRIDGE_IFCAPS_STRIP;
1017 		stuck = ifp->if_capenable & mask & ~set;
1018 		if (stuck != 0)
1019 			if_printf(sc->sc_ifp,
1020 			    "can't disable some capabilities on %s: 0x%x\n",
1021 			    ifp->if_xname, stuck);
1022 	}
1023 }
1024 
1025 /*
1026  * bridge_lookup_member:
1027  *
1028  *	Lookup a bridge member interface.
1029  */
1030 static struct bridge_iflist *
1031 bridge_lookup_member(struct bridge_softc *sc, const char *name)
1032 {
1033 	struct bridge_iflist *bif;
1034 	struct ifnet *ifp;
1035 
1036 	BRIDGE_LOCK_OR_NET_EPOCH_ASSERT(sc);
1037 
1038 	CK_LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
1039 		ifp = bif->bif_ifp;
1040 		if (strcmp(ifp->if_xname, name) == 0)
1041 			return (bif);
1042 	}
1043 
1044 	return (NULL);
1045 }
1046 
1047 /*
1048  * bridge_lookup_member_if:
1049  *
1050  *	Lookup a bridge member interface by ifnet*.
1051  */
1052 static struct bridge_iflist *
1053 bridge_lookup_member_if(struct bridge_softc *sc, struct ifnet *member_ifp)
1054 {
1055 	struct bridge_iflist *bif;
1056 
1057 	BRIDGE_LOCK_OR_NET_EPOCH_ASSERT(sc);
1058 
1059 	CK_LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
1060 		if (bif->bif_ifp == member_ifp)
1061 			return (bif);
1062 	}
1063 
1064 	return (NULL);
1065 }
1066 
1067 static void
1068 bridge_delete_member_cb(struct epoch_context *ctx)
1069 {
1070 	struct bridge_iflist *bif;
1071 
1072 	bif = __containerof(ctx, struct bridge_iflist, bif_epoch_ctx);
1073 
1074 	free(bif, M_DEVBUF);
1075 }
1076 
1077 /*
1078  * bridge_delete_member:
1079  *
1080  *	Delete the specified member interface.
1081  */
1082 static void
1083 bridge_delete_member(struct bridge_softc *sc, struct bridge_iflist *bif,
1084     int gone)
1085 {
1086 	struct ifnet *ifs = bif->bif_ifp;
1087 	struct ifnet *fif = NULL;
1088 	struct bridge_iflist *bifl;
1089 
1090 	BRIDGE_LOCK_ASSERT(sc);
1091 
1092 	if (bif->bif_flags & IFBIF_STP)
1093 		bstp_disable(&bif->bif_stp);
1094 
1095 	ifs->if_bridge = NULL;
1096 	CK_LIST_REMOVE(bif, bif_next);
1097 
1098 	/*
1099 	 * If removing the interface that gave the bridge its mac address, set
1100 	 * the mac address of the bridge to the address of the next member, or
1101 	 * to its default address if no members are left.
1102 	 */
1103 	if (V_bridge_inherit_mac && sc->sc_ifaddr == ifs) {
1104 		if (CK_LIST_EMPTY(&sc->sc_iflist)) {
1105 			bcopy(&sc->sc_defaddr,
1106 			    IF_LLADDR(sc->sc_ifp), ETHER_ADDR_LEN);
1107 			sc->sc_ifaddr = NULL;
1108 		} else {
1109 			bifl = CK_LIST_FIRST(&sc->sc_iflist);
1110 			fif = bifl->bif_ifp;
1111 			bcopy(IF_LLADDR(fif),
1112 			    IF_LLADDR(sc->sc_ifp), ETHER_ADDR_LEN);
1113 			sc->sc_ifaddr = fif;
1114 		}
1115 		EVENTHANDLER_INVOKE(iflladdr_event, sc->sc_ifp);
1116 	}
1117 
1118 	bridge_linkcheck(sc);
1119 	bridge_mutecaps(sc);	/* recalcuate now this interface is removed */
1120 	BRIDGE_RT_LOCK(sc);
1121 	bridge_rtdelete(sc, ifs, IFBF_FLUSHALL);
1122 	BRIDGE_RT_UNLOCK(sc);
1123 	KASSERT(bif->bif_addrcnt == 0,
1124 	    ("%s: %d bridge routes referenced", __func__, bif->bif_addrcnt));
1125 
1126 	ifs->if_bridge_output = NULL;
1127 	ifs->if_bridge_input = NULL;
1128 	ifs->if_bridge_linkstate = NULL;
1129 	if (!gone) {
1130 		switch (ifs->if_type) {
1131 		case IFT_ETHER:
1132 		case IFT_L2VLAN:
1133 			/*
1134 			 * Take the interface out of promiscuous mode, but only
1135 			 * if it was promiscuous in the first place. It might
1136 			 * not be if we're in the bridge_ioctl_add() error path.
1137 			 */
1138 			if (ifs->if_flags & IFF_PROMISC)
1139 				(void) ifpromisc(ifs, 0);
1140 			break;
1141 
1142 		case IFT_GIF:
1143 			break;
1144 
1145 		default:
1146 #ifdef DIAGNOSTIC
1147 			panic("bridge_delete_member: impossible");
1148 #endif
1149 			break;
1150 		}
1151 		/* reneable any interface capabilities */
1152 		bridge_set_ifcap(sc, bif, bif->bif_savedcaps);
1153 	}
1154 	bstp_destroy(&bif->bif_stp);	/* prepare to free */
1155 
1156 	NET_EPOCH_CALL(bridge_delete_member_cb, &bif->bif_epoch_ctx);
1157 }
1158 
1159 /*
1160  * bridge_delete_span:
1161  *
1162  *	Delete the specified span interface.
1163  */
1164 static void
1165 bridge_delete_span(struct bridge_softc *sc, struct bridge_iflist *bif)
1166 {
1167 	BRIDGE_LOCK_ASSERT(sc);
1168 
1169 	KASSERT(bif->bif_ifp->if_bridge == NULL,
1170 	    ("%s: not a span interface", __func__));
1171 
1172 	CK_LIST_REMOVE(bif, bif_next);
1173 
1174 	NET_EPOCH_CALL(bridge_delete_member_cb, &bif->bif_epoch_ctx);
1175 }
1176 
1177 static int
1178 bridge_ioctl_add(struct bridge_softc *sc, void *arg)
1179 {
1180 	struct ifbreq *req = arg;
1181 	struct bridge_iflist *bif = NULL;
1182 	struct ifnet *ifs;
1183 	int error = 0;
1184 
1185 	ifs = ifunit(req->ifbr_ifsname);
1186 	if (ifs == NULL)
1187 		return (ENOENT);
1188 	if (ifs->if_ioctl == NULL)	/* must be supported */
1189 		return (EINVAL);
1190 
1191 	/* If it's in the span list, it can't be a member. */
1192 	CK_LIST_FOREACH(bif, &sc->sc_spanlist, bif_next)
1193 		if (ifs == bif->bif_ifp)
1194 			return (EBUSY);
1195 
1196 	if (ifs->if_bridge == sc)
1197 		return (EEXIST);
1198 
1199 	if (ifs->if_bridge != NULL)
1200 		return (EBUSY);
1201 
1202 	switch (ifs->if_type) {
1203 	case IFT_ETHER:
1204 	case IFT_L2VLAN:
1205 	case IFT_GIF:
1206 		/* permitted interface types */
1207 		break;
1208 	default:
1209 		return (EINVAL);
1210 	}
1211 
1212 #ifdef INET6
1213 	/*
1214 	 * Two valid inet6 addresses with link-local scope must not be
1215 	 * on the parent interface and the member interfaces at the
1216 	 * same time.  This restriction is needed to prevent violation
1217 	 * of link-local scope zone.  Attempts to add a member
1218 	 * interface which has inet6 addresses when the parent has
1219 	 * inet6 triggers removal of all inet6 addresses on the member
1220 	 * interface.
1221 	 */
1222 
1223 	/* Check if the parent interface has a link-local scope addr. */
1224 	if (V_allow_llz_overlap == 0 &&
1225 	    in6ifa_llaonifp(sc->sc_ifp) != NULL) {
1226 		/*
1227 		 * If any, remove all inet6 addresses from the member
1228 		 * interfaces.
1229 		 */
1230 		CK_LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
1231  			if (in6ifa_llaonifp(bif->bif_ifp)) {
1232 				in6_ifdetach(bif->bif_ifp);
1233 				if_printf(sc->sc_ifp,
1234 				    "IPv6 addresses on %s have been removed "
1235 				    "before adding it as a member to prevent "
1236 				    "IPv6 address scope violation.\n",
1237 				    bif->bif_ifp->if_xname);
1238 			}
1239 		}
1240 		if (in6ifa_llaonifp(ifs)) {
1241 			in6_ifdetach(ifs);
1242 			if_printf(sc->sc_ifp,
1243 			    "IPv6 addresses on %s have been removed "
1244 			    "before adding it as a member to prevent "
1245 			    "IPv6 address scope violation.\n",
1246 			    ifs->if_xname);
1247 		}
1248 	}
1249 #endif
1250 	/* Allow the first Ethernet member to define the MTU */
1251 	if (CK_LIST_EMPTY(&sc->sc_iflist))
1252 		sc->sc_ifp->if_mtu = ifs->if_mtu;
1253 	else if (sc->sc_ifp->if_mtu != ifs->if_mtu) {
1254 		if_printf(sc->sc_ifp, "invalid MTU: %u(%s) != %u\n",
1255 		    ifs->if_mtu, ifs->if_xname, sc->sc_ifp->if_mtu);
1256 		return (EINVAL);
1257 	}
1258 
1259 	bif = malloc(sizeof(*bif), M_DEVBUF, M_NOWAIT|M_ZERO);
1260 	if (bif == NULL)
1261 		return (ENOMEM);
1262 
1263 	bif->bif_ifp = ifs;
1264 	bif->bif_flags = IFBIF_LEARNING | IFBIF_DISCOVER;
1265 	bif->bif_savedcaps = ifs->if_capenable;
1266 
1267 	/*
1268 	 * Assign the interface's MAC address to the bridge if it's the first
1269 	 * member and the MAC address of the bridge has not been changed from
1270 	 * the default randomly generated one.
1271 	 */
1272 	if (V_bridge_inherit_mac && CK_LIST_EMPTY(&sc->sc_iflist) &&
1273 	    !memcmp(IF_LLADDR(sc->sc_ifp), sc->sc_defaddr.octet, ETHER_ADDR_LEN)) {
1274 		bcopy(IF_LLADDR(ifs), IF_LLADDR(sc->sc_ifp), ETHER_ADDR_LEN);
1275 		sc->sc_ifaddr = ifs;
1276 		EVENTHANDLER_INVOKE(iflladdr_event, sc->sc_ifp);
1277 	}
1278 
1279 	ifs->if_bridge = sc;
1280 	ifs->if_bridge_output = bridge_output;
1281 	ifs->if_bridge_input = bridge_input;
1282 	ifs->if_bridge_linkstate = bridge_linkstate;
1283 	bstp_create(&sc->sc_stp, &bif->bif_stp, bif->bif_ifp);
1284 	/*
1285 	 * XXX: XLOCK HERE!?!
1286 	 *
1287 	 * NOTE: insert_***HEAD*** should be safe for the traversals.
1288 	 */
1289 	CK_LIST_INSERT_HEAD(&sc->sc_iflist, bif, bif_next);
1290 
1291 	/* Set interface capabilities to the intersection set of all members */
1292 	bridge_mutecaps(sc);
1293 	bridge_linkcheck(sc);
1294 
1295 	/* Place the interface into promiscuous mode */
1296 	switch (ifs->if_type) {
1297 		case IFT_ETHER:
1298 		case IFT_L2VLAN:
1299 			error = ifpromisc(ifs, 1);
1300 			break;
1301 	}
1302 
1303 	if (error)
1304 		bridge_delete_member(sc, bif, 0);
1305 	return (error);
1306 }
1307 
1308 static int
1309 bridge_ioctl_del(struct bridge_softc *sc, void *arg)
1310 {
1311 	struct ifbreq *req = arg;
1312 	struct bridge_iflist *bif;
1313 
1314 	bif = bridge_lookup_member(sc, req->ifbr_ifsname);
1315 	if (bif == NULL)
1316 		return (ENOENT);
1317 
1318 	bridge_delete_member(sc, bif, 0);
1319 
1320 	return (0);
1321 }
1322 
1323 static int
1324 bridge_ioctl_gifflags(struct bridge_softc *sc, void *arg)
1325 {
1326 	struct ifbreq *req = arg;
1327 	struct bridge_iflist *bif;
1328 	struct bstp_port *bp;
1329 
1330 	bif = bridge_lookup_member(sc, req->ifbr_ifsname);
1331 	if (bif == NULL)
1332 		return (ENOENT);
1333 
1334 	bp = &bif->bif_stp;
1335 	req->ifbr_ifsflags = bif->bif_flags;
1336 	req->ifbr_state = bp->bp_state;
1337 	req->ifbr_priority = bp->bp_priority;
1338 	req->ifbr_path_cost = bp->bp_path_cost;
1339 	req->ifbr_portno = bif->bif_ifp->if_index & 0xfff;
1340 	req->ifbr_proto = bp->bp_protover;
1341 	req->ifbr_role = bp->bp_role;
1342 	req->ifbr_stpflags = bp->bp_flags;
1343 	req->ifbr_addrcnt = bif->bif_addrcnt;
1344 	req->ifbr_addrmax = bif->bif_addrmax;
1345 	req->ifbr_addrexceeded = bif->bif_addrexceeded;
1346 
1347 	/* Copy STP state options as flags */
1348 	if (bp->bp_operedge)
1349 		req->ifbr_ifsflags |= IFBIF_BSTP_EDGE;
1350 	if (bp->bp_flags & BSTP_PORT_AUTOEDGE)
1351 		req->ifbr_ifsflags |= IFBIF_BSTP_AUTOEDGE;
1352 	if (bp->bp_ptp_link)
1353 		req->ifbr_ifsflags |= IFBIF_BSTP_PTP;
1354 	if (bp->bp_flags & BSTP_PORT_AUTOPTP)
1355 		req->ifbr_ifsflags |= IFBIF_BSTP_AUTOPTP;
1356 	if (bp->bp_flags & BSTP_PORT_ADMEDGE)
1357 		req->ifbr_ifsflags |= IFBIF_BSTP_ADMEDGE;
1358 	if (bp->bp_flags & BSTP_PORT_ADMCOST)
1359 		req->ifbr_ifsflags |= IFBIF_BSTP_ADMCOST;
1360 	return (0);
1361 }
1362 
1363 static int
1364 bridge_ioctl_sifflags(struct bridge_softc *sc, void *arg)
1365 {
1366 	struct epoch_tracker et;
1367 	struct ifbreq *req = arg;
1368 	struct bridge_iflist *bif;
1369 	struct bstp_port *bp;
1370 	int error;
1371 
1372 	bif = bridge_lookup_member(sc, req->ifbr_ifsname);
1373 	if (bif == NULL)
1374 		return (ENOENT);
1375 	bp = &bif->bif_stp;
1376 
1377 	if (req->ifbr_ifsflags & IFBIF_SPAN)
1378 		/* SPAN is readonly */
1379 		return (EINVAL);
1380 
1381 	NET_EPOCH_ENTER(et);
1382 
1383 	if (req->ifbr_ifsflags & IFBIF_STP) {
1384 		if ((bif->bif_flags & IFBIF_STP) == 0) {
1385 			error = bstp_enable(&bif->bif_stp);
1386 			if (error) {
1387 				NET_EPOCH_EXIT(et);
1388 				return (error);
1389 			}
1390 		}
1391 	} else {
1392 		if ((bif->bif_flags & IFBIF_STP) != 0)
1393 			bstp_disable(&bif->bif_stp);
1394 	}
1395 
1396 	/* Pass on STP flags */
1397 	bstp_set_edge(bp, req->ifbr_ifsflags & IFBIF_BSTP_EDGE ? 1 : 0);
1398 	bstp_set_autoedge(bp, req->ifbr_ifsflags & IFBIF_BSTP_AUTOEDGE ? 1 : 0);
1399 	bstp_set_ptp(bp, req->ifbr_ifsflags & IFBIF_BSTP_PTP ? 1 : 0);
1400 	bstp_set_autoptp(bp, req->ifbr_ifsflags & IFBIF_BSTP_AUTOPTP ? 1 : 0);
1401 
1402 	/* Save the bits relating to the bridge */
1403 	bif->bif_flags = req->ifbr_ifsflags & IFBIFMASK;
1404 
1405 	NET_EPOCH_EXIT(et);
1406 
1407 	return (0);
1408 }
1409 
1410 static int
1411 bridge_ioctl_scache(struct bridge_softc *sc, void *arg)
1412 {
1413 	struct ifbrparam *param = arg;
1414 
1415 	sc->sc_brtmax = param->ifbrp_csize;
1416 	bridge_rttrim(sc);
1417 
1418 	return (0);
1419 }
1420 
1421 static int
1422 bridge_ioctl_gcache(struct bridge_softc *sc, void *arg)
1423 {
1424 	struct ifbrparam *param = arg;
1425 
1426 	param->ifbrp_csize = sc->sc_brtmax;
1427 
1428 	return (0);
1429 }
1430 
1431 static int
1432 bridge_ioctl_gifs(struct bridge_softc *sc, void *arg)
1433 {
1434 	struct ifbifconf *bifc = arg;
1435 	struct bridge_iflist *bif;
1436 	struct ifbreq breq;
1437 	char *buf, *outbuf;
1438 	int count, buflen, len, error = 0;
1439 
1440 	count = 0;
1441 	CK_LIST_FOREACH(bif, &sc->sc_iflist, bif_next)
1442 		count++;
1443 	CK_LIST_FOREACH(bif, &sc->sc_spanlist, bif_next)
1444 		count++;
1445 
1446 	buflen = sizeof(breq) * count;
1447 	if (bifc->ifbic_len == 0) {
1448 		bifc->ifbic_len = buflen;
1449 		return (0);
1450 	}
1451 	outbuf = malloc(buflen, M_TEMP, M_NOWAIT | M_ZERO);
1452 	if (outbuf == NULL)
1453 		return (ENOMEM);
1454 
1455 	count = 0;
1456 	buf = outbuf;
1457 	len = min(bifc->ifbic_len, buflen);
1458 	bzero(&breq, sizeof(breq));
1459 	CK_LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
1460 		if (len < sizeof(breq))
1461 			break;
1462 
1463 		strlcpy(breq.ifbr_ifsname, bif->bif_ifp->if_xname,
1464 		    sizeof(breq.ifbr_ifsname));
1465 		/* Fill in the ifbreq structure */
1466 		error = bridge_ioctl_gifflags(sc, &breq);
1467 		if (error)
1468 			break;
1469 		memcpy(buf, &breq, sizeof(breq));
1470 		count++;
1471 		buf += sizeof(breq);
1472 		len -= sizeof(breq);
1473 	}
1474 	CK_LIST_FOREACH(bif, &sc->sc_spanlist, bif_next) {
1475 		if (len < sizeof(breq))
1476 			break;
1477 
1478 		strlcpy(breq.ifbr_ifsname, bif->bif_ifp->if_xname,
1479 		    sizeof(breq.ifbr_ifsname));
1480 		breq.ifbr_ifsflags = bif->bif_flags;
1481 		breq.ifbr_portno = bif->bif_ifp->if_index & 0xfff;
1482 		memcpy(buf, &breq, sizeof(breq));
1483 		count++;
1484 		buf += sizeof(breq);
1485 		len -= sizeof(breq);
1486 	}
1487 
1488 	bifc->ifbic_len = sizeof(breq) * count;
1489 	error = copyout(outbuf, bifc->ifbic_req, bifc->ifbic_len);
1490 	free(outbuf, M_TEMP);
1491 	return (error);
1492 }
1493 
1494 static int
1495 bridge_ioctl_rts(struct bridge_softc *sc, void *arg)
1496 {
1497 	struct ifbaconf *bac = arg;
1498 	struct bridge_rtnode *brt;
1499 	struct ifbareq bareq;
1500 	char *buf, *outbuf;
1501 	int count, buflen, len, error = 0;
1502 
1503 	if (bac->ifbac_len == 0)
1504 		return (0);
1505 
1506 	count = 0;
1507 	CK_LIST_FOREACH(brt, &sc->sc_rtlist, brt_list)
1508 		count++;
1509 	buflen = sizeof(bareq) * count;
1510 
1511 	outbuf = malloc(buflen, M_TEMP, M_NOWAIT | M_ZERO);
1512 	if (outbuf == NULL)
1513 		return (ENOMEM);
1514 
1515 	count = 0;
1516 	buf = outbuf;
1517 	len = min(bac->ifbac_len, buflen);
1518 	bzero(&bareq, sizeof(bareq));
1519 	CK_LIST_FOREACH(brt, &sc->sc_rtlist, brt_list) {
1520 		if (len < sizeof(bareq))
1521 			goto out;
1522 		strlcpy(bareq.ifba_ifsname, brt->brt_ifp->if_xname,
1523 		    sizeof(bareq.ifba_ifsname));
1524 		memcpy(bareq.ifba_dst, brt->brt_addr, sizeof(brt->brt_addr));
1525 		bareq.ifba_vlan = brt->brt_vlan;
1526 		if ((brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC &&
1527 				time_uptime < brt->brt_expire)
1528 			bareq.ifba_expire = brt->brt_expire - time_uptime;
1529 		else
1530 			bareq.ifba_expire = 0;
1531 		bareq.ifba_flags = brt->brt_flags;
1532 
1533 		memcpy(buf, &bareq, sizeof(bareq));
1534 		count++;
1535 		buf += sizeof(bareq);
1536 		len -= sizeof(bareq);
1537 	}
1538 out:
1539 	bac->ifbac_len = sizeof(bareq) * count;
1540 	error = copyout(outbuf, bac->ifbac_req, bac->ifbac_len);
1541 	free(outbuf, M_TEMP);
1542 	return (error);
1543 }
1544 
1545 static int
1546 bridge_ioctl_saddr(struct bridge_softc *sc, void *arg)
1547 {
1548 	struct ifbareq *req = arg;
1549 	struct bridge_iflist *bif;
1550 	struct epoch_tracker et;
1551 	int error;
1552 
1553 	NET_EPOCH_ENTER(et);
1554 	bif = bridge_lookup_member(sc, req->ifba_ifsname);
1555 	if (bif == NULL) {
1556 		NET_EPOCH_EXIT(et);
1557 		return (ENOENT);
1558 	}
1559 
1560 	/* bridge_rtupdate() may acquire the lock. */
1561 	error = bridge_rtupdate(sc, req->ifba_dst, req->ifba_vlan, bif, 1,
1562 	    req->ifba_flags);
1563 	NET_EPOCH_EXIT(et);
1564 
1565 	return (error);
1566 }
1567 
1568 static int
1569 bridge_ioctl_sto(struct bridge_softc *sc, void *arg)
1570 {
1571 	struct ifbrparam *param = arg;
1572 
1573 	sc->sc_brttimeout = param->ifbrp_ctime;
1574 	return (0);
1575 }
1576 
1577 static int
1578 bridge_ioctl_gto(struct bridge_softc *sc, void *arg)
1579 {
1580 	struct ifbrparam *param = arg;
1581 
1582 	param->ifbrp_ctime = sc->sc_brttimeout;
1583 	return (0);
1584 }
1585 
1586 static int
1587 bridge_ioctl_daddr(struct bridge_softc *sc, void *arg)
1588 {
1589 	struct ifbareq *req = arg;
1590 
1591 	return (bridge_rtdaddr(sc, req->ifba_dst, req->ifba_vlan));
1592 }
1593 
1594 static int
1595 bridge_ioctl_flush(struct bridge_softc *sc, void *arg)
1596 {
1597 	struct ifbreq *req = arg;
1598 
1599 	BRIDGE_RT_LOCK(sc);
1600 	bridge_rtflush(sc, req->ifbr_ifsflags);
1601 	BRIDGE_RT_UNLOCK(sc);
1602 
1603 	return (0);
1604 }
1605 
1606 static int
1607 bridge_ioctl_gpri(struct bridge_softc *sc, void *arg)
1608 {
1609 	struct ifbrparam *param = arg;
1610 	struct bstp_state *bs = &sc->sc_stp;
1611 
1612 	param->ifbrp_prio = bs->bs_bridge_priority;
1613 	return (0);
1614 }
1615 
1616 static int
1617 bridge_ioctl_spri(struct bridge_softc *sc, void *arg)
1618 {
1619 	struct ifbrparam *param = arg;
1620 
1621 	return (bstp_set_priority(&sc->sc_stp, param->ifbrp_prio));
1622 }
1623 
1624 static int
1625 bridge_ioctl_ght(struct bridge_softc *sc, void *arg)
1626 {
1627 	struct ifbrparam *param = arg;
1628 	struct bstp_state *bs = &sc->sc_stp;
1629 
1630 	param->ifbrp_hellotime = bs->bs_bridge_htime >> 8;
1631 	return (0);
1632 }
1633 
1634 static int
1635 bridge_ioctl_sht(struct bridge_softc *sc, void *arg)
1636 {
1637 	struct ifbrparam *param = arg;
1638 
1639 	return (bstp_set_htime(&sc->sc_stp, param->ifbrp_hellotime));
1640 }
1641 
1642 static int
1643 bridge_ioctl_gfd(struct bridge_softc *sc, void *arg)
1644 {
1645 	struct ifbrparam *param = arg;
1646 	struct bstp_state *bs = &sc->sc_stp;
1647 
1648 	param->ifbrp_fwddelay = bs->bs_bridge_fdelay >> 8;
1649 	return (0);
1650 }
1651 
1652 static int
1653 bridge_ioctl_sfd(struct bridge_softc *sc, void *arg)
1654 {
1655 	struct ifbrparam *param = arg;
1656 
1657 	return (bstp_set_fdelay(&sc->sc_stp, param->ifbrp_fwddelay));
1658 }
1659 
1660 static int
1661 bridge_ioctl_gma(struct bridge_softc *sc, void *arg)
1662 {
1663 	struct ifbrparam *param = arg;
1664 	struct bstp_state *bs = &sc->sc_stp;
1665 
1666 	param->ifbrp_maxage = bs->bs_bridge_max_age >> 8;
1667 	return (0);
1668 }
1669 
1670 static int
1671 bridge_ioctl_sma(struct bridge_softc *sc, void *arg)
1672 {
1673 	struct ifbrparam *param = arg;
1674 
1675 	return (bstp_set_maxage(&sc->sc_stp, param->ifbrp_maxage));
1676 }
1677 
1678 static int
1679 bridge_ioctl_sifprio(struct bridge_softc *sc, void *arg)
1680 {
1681 	struct ifbreq *req = arg;
1682 	struct bridge_iflist *bif;
1683 
1684 	bif = bridge_lookup_member(sc, req->ifbr_ifsname);
1685 	if (bif == NULL)
1686 		return (ENOENT);
1687 
1688 	return (bstp_set_port_priority(&bif->bif_stp, req->ifbr_priority));
1689 }
1690 
1691 static int
1692 bridge_ioctl_sifcost(struct bridge_softc *sc, void *arg)
1693 {
1694 	struct ifbreq *req = arg;
1695 	struct bridge_iflist *bif;
1696 
1697 	bif = bridge_lookup_member(sc, req->ifbr_ifsname);
1698 	if (bif == NULL)
1699 		return (ENOENT);
1700 
1701 	return (bstp_set_path_cost(&bif->bif_stp, req->ifbr_path_cost));
1702 }
1703 
1704 static int
1705 bridge_ioctl_sifmaxaddr(struct bridge_softc *sc, void *arg)
1706 {
1707 	struct ifbreq *req = arg;
1708 	struct bridge_iflist *bif;
1709 
1710 	bif = bridge_lookup_member(sc, req->ifbr_ifsname);
1711 	if (bif == NULL)
1712 		return (ENOENT);
1713 
1714 	bif->bif_addrmax = req->ifbr_addrmax;
1715 	return (0);
1716 }
1717 
1718 static int
1719 bridge_ioctl_addspan(struct bridge_softc *sc, void *arg)
1720 {
1721 	struct ifbreq *req = arg;
1722 	struct bridge_iflist *bif = NULL;
1723 	struct ifnet *ifs;
1724 
1725 	ifs = ifunit(req->ifbr_ifsname);
1726 	if (ifs == NULL)
1727 		return (ENOENT);
1728 
1729 	CK_LIST_FOREACH(bif, &sc->sc_spanlist, bif_next)
1730 		if (ifs == bif->bif_ifp)
1731 			return (EBUSY);
1732 
1733 	if (ifs->if_bridge != NULL)
1734 		return (EBUSY);
1735 
1736 	switch (ifs->if_type) {
1737 		case IFT_ETHER:
1738 		case IFT_GIF:
1739 		case IFT_L2VLAN:
1740 			break;
1741 		default:
1742 			return (EINVAL);
1743 	}
1744 
1745 	bif = malloc(sizeof(*bif), M_DEVBUF, M_NOWAIT|M_ZERO);
1746 	if (bif == NULL)
1747 		return (ENOMEM);
1748 
1749 	bif->bif_ifp = ifs;
1750 	bif->bif_flags = IFBIF_SPAN;
1751 
1752 	CK_LIST_INSERT_HEAD(&sc->sc_spanlist, bif, bif_next);
1753 
1754 	return (0);
1755 }
1756 
1757 static int
1758 bridge_ioctl_delspan(struct bridge_softc *sc, void *arg)
1759 {
1760 	struct ifbreq *req = arg;
1761 	struct bridge_iflist *bif;
1762 	struct ifnet *ifs;
1763 
1764 	ifs = ifunit(req->ifbr_ifsname);
1765 	if (ifs == NULL)
1766 		return (ENOENT);
1767 
1768 	CK_LIST_FOREACH(bif, &sc->sc_spanlist, bif_next)
1769 		if (ifs == bif->bif_ifp)
1770 			break;
1771 
1772 	if (bif == NULL)
1773 		return (ENOENT);
1774 
1775 	bridge_delete_span(sc, bif);
1776 
1777 	return (0);
1778 }
1779 
1780 static int
1781 bridge_ioctl_gbparam(struct bridge_softc *sc, void *arg)
1782 {
1783 	struct ifbropreq *req = arg;
1784 	struct bstp_state *bs = &sc->sc_stp;
1785 	struct bstp_port *root_port;
1786 
1787 	req->ifbop_maxage = bs->bs_bridge_max_age >> 8;
1788 	req->ifbop_hellotime = bs->bs_bridge_htime >> 8;
1789 	req->ifbop_fwddelay = bs->bs_bridge_fdelay >> 8;
1790 
1791 	root_port = bs->bs_root_port;
1792 	if (root_port == NULL)
1793 		req->ifbop_root_port = 0;
1794 	else
1795 		req->ifbop_root_port = root_port->bp_ifp->if_index;
1796 
1797 	req->ifbop_holdcount = bs->bs_txholdcount;
1798 	req->ifbop_priority = bs->bs_bridge_priority;
1799 	req->ifbop_protocol = bs->bs_protover;
1800 	req->ifbop_root_path_cost = bs->bs_root_pv.pv_cost;
1801 	req->ifbop_bridgeid = bs->bs_bridge_pv.pv_dbridge_id;
1802 	req->ifbop_designated_root = bs->bs_root_pv.pv_root_id;
1803 	req->ifbop_designated_bridge = bs->bs_root_pv.pv_dbridge_id;
1804 	req->ifbop_last_tc_time.tv_sec = bs->bs_last_tc_time.tv_sec;
1805 	req->ifbop_last_tc_time.tv_usec = bs->bs_last_tc_time.tv_usec;
1806 
1807 	return (0);
1808 }
1809 
1810 static int
1811 bridge_ioctl_grte(struct bridge_softc *sc, void *arg)
1812 {
1813 	struct ifbrparam *param = arg;
1814 
1815 	param->ifbrp_cexceeded = sc->sc_brtexceeded;
1816 	return (0);
1817 }
1818 
1819 static int
1820 bridge_ioctl_gifsstp(struct bridge_softc *sc, void *arg)
1821 {
1822 	struct ifbpstpconf *bifstp = arg;
1823 	struct bridge_iflist *bif;
1824 	struct bstp_port *bp;
1825 	struct ifbpstpreq bpreq;
1826 	char *buf, *outbuf;
1827 	int count, buflen, len, error = 0;
1828 
1829 	count = 0;
1830 	CK_LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
1831 		if ((bif->bif_flags & IFBIF_STP) != 0)
1832 			count++;
1833 	}
1834 
1835 	buflen = sizeof(bpreq) * count;
1836 	if (bifstp->ifbpstp_len == 0) {
1837 		bifstp->ifbpstp_len = buflen;
1838 		return (0);
1839 	}
1840 
1841 	outbuf = malloc(buflen, M_TEMP, M_NOWAIT | M_ZERO);
1842 	if (outbuf == NULL)
1843 		return (ENOMEM);
1844 
1845 	count = 0;
1846 	buf = outbuf;
1847 	len = min(bifstp->ifbpstp_len, buflen);
1848 	bzero(&bpreq, sizeof(bpreq));
1849 	CK_LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
1850 		if (len < sizeof(bpreq))
1851 			break;
1852 
1853 		if ((bif->bif_flags & IFBIF_STP) == 0)
1854 			continue;
1855 
1856 		bp = &bif->bif_stp;
1857 		bpreq.ifbp_portno = bif->bif_ifp->if_index & 0xfff;
1858 		bpreq.ifbp_fwd_trans = bp->bp_forward_transitions;
1859 		bpreq.ifbp_design_cost = bp->bp_desg_pv.pv_cost;
1860 		bpreq.ifbp_design_port = bp->bp_desg_pv.pv_port_id;
1861 		bpreq.ifbp_design_bridge = bp->bp_desg_pv.pv_dbridge_id;
1862 		bpreq.ifbp_design_root = bp->bp_desg_pv.pv_root_id;
1863 
1864 		memcpy(buf, &bpreq, sizeof(bpreq));
1865 		count++;
1866 		buf += sizeof(bpreq);
1867 		len -= sizeof(bpreq);
1868 	}
1869 
1870 	bifstp->ifbpstp_len = sizeof(bpreq) * count;
1871 	error = copyout(outbuf, bifstp->ifbpstp_req, bifstp->ifbpstp_len);
1872 	free(outbuf, M_TEMP);
1873 	return (error);
1874 }
1875 
1876 static int
1877 bridge_ioctl_sproto(struct bridge_softc *sc, void *arg)
1878 {
1879 	struct ifbrparam *param = arg;
1880 
1881 	return (bstp_set_protocol(&sc->sc_stp, param->ifbrp_proto));
1882 }
1883 
1884 static int
1885 bridge_ioctl_stxhc(struct bridge_softc *sc, void *arg)
1886 {
1887 	struct ifbrparam *param = arg;
1888 
1889 	return (bstp_set_holdcount(&sc->sc_stp, param->ifbrp_txhc));
1890 }
1891 
1892 /*
1893  * bridge_ifdetach:
1894  *
1895  *	Detach an interface from a bridge.  Called when a member
1896  *	interface is detaching.
1897  */
1898 static void
1899 bridge_ifdetach(void *arg __unused, struct ifnet *ifp)
1900 {
1901 	struct bridge_softc *sc = ifp->if_bridge;
1902 	struct bridge_iflist *bif;
1903 
1904 	if (ifp->if_flags & IFF_RENAMING)
1905 		return;
1906 	if (V_bridge_cloner == NULL) {
1907 		/*
1908 		 * This detach handler can be called after
1909 		 * vnet_bridge_uninit().  Just return in that case.
1910 		 */
1911 		return;
1912 	}
1913 	/* Check if the interface is a bridge member */
1914 	if (sc != NULL) {
1915 		BRIDGE_LOCK(sc);
1916 
1917 		bif = bridge_lookup_member_if(sc, ifp);
1918 		if (bif != NULL)
1919 			bridge_delete_member(sc, bif, 1);
1920 
1921 		BRIDGE_UNLOCK(sc);
1922 		return;
1923 	}
1924 
1925 	/* Check if the interface is a span port */
1926 	BRIDGE_LIST_LOCK();
1927 	LIST_FOREACH(sc, &V_bridge_list, sc_list) {
1928 		BRIDGE_LOCK(sc);
1929 		CK_LIST_FOREACH(bif, &sc->sc_spanlist, bif_next)
1930 			if (ifp == bif->bif_ifp) {
1931 				bridge_delete_span(sc, bif);
1932 				break;
1933 			}
1934 
1935 		BRIDGE_UNLOCK(sc);
1936 	}
1937 	BRIDGE_LIST_UNLOCK();
1938 }
1939 
1940 /*
1941  * bridge_init:
1942  *
1943  *	Initialize a bridge interface.
1944  */
1945 static void
1946 bridge_init(void *xsc)
1947 {
1948 	struct bridge_softc *sc = (struct bridge_softc *)xsc;
1949 	struct ifnet *ifp = sc->sc_ifp;
1950 
1951 	if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1952 		return;
1953 
1954 	BRIDGE_LOCK(sc);
1955 	callout_reset(&sc->sc_brcallout, bridge_rtable_prune_period * hz,
1956 	    bridge_timer, sc);
1957 
1958 	ifp->if_drv_flags |= IFF_DRV_RUNNING;
1959 	bstp_init(&sc->sc_stp);		/* Initialize Spanning Tree */
1960 
1961 	BRIDGE_UNLOCK(sc);
1962 }
1963 
1964 /*
1965  * bridge_stop:
1966  *
1967  *	Stop the bridge interface.
1968  */
1969 static void
1970 bridge_stop(struct ifnet *ifp, int disable)
1971 {
1972 	struct bridge_softc *sc = ifp->if_softc;
1973 
1974 	BRIDGE_LOCK_ASSERT(sc);
1975 
1976 	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1977 		return;
1978 
1979 	BRIDGE_RT_LOCK(sc);
1980 	callout_stop(&sc->sc_brcallout);
1981 
1982 	bstp_stop(&sc->sc_stp);
1983 
1984 	bridge_rtflush(sc, IFBF_FLUSHDYN);
1985 	BRIDGE_RT_UNLOCK(sc);
1986 
1987 	ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1988 }
1989 
1990 /*
1991  * bridge_enqueue:
1992  *
1993  *	Enqueue a packet on a bridge member interface.
1994  *
1995  */
1996 static int
1997 bridge_enqueue(struct bridge_softc *sc, struct ifnet *dst_ifp, struct mbuf *m)
1998 {
1999 	int len, err = 0;
2000 	short mflags;
2001 	struct mbuf *m0;
2002 
2003 	/* We may be sending a fragment so traverse the mbuf */
2004 	for (; m; m = m0) {
2005 		m0 = m->m_nextpkt;
2006 		m->m_nextpkt = NULL;
2007 		len = m->m_pkthdr.len;
2008 		mflags = m->m_flags;
2009 
2010 		/*
2011 		 * If underlying interface can not do VLAN tag insertion itself
2012 		 * then attach a packet tag that holds it.
2013 		 */
2014 		if ((m->m_flags & M_VLANTAG) &&
2015 		    (dst_ifp->if_capenable & IFCAP_VLAN_HWTAGGING) == 0) {
2016 			m = ether_vlanencap(m, m->m_pkthdr.ether_vtag);
2017 			if (m == NULL) {
2018 				if_printf(dst_ifp,
2019 				    "unable to prepend VLAN header\n");
2020 				if_inc_counter(dst_ifp, IFCOUNTER_OERRORS, 1);
2021 				continue;
2022 			}
2023 			m->m_flags &= ~M_VLANTAG;
2024 		}
2025 
2026 		M_ASSERTPKTHDR(m); /* We shouldn't transmit mbuf without pkthdr */
2027 		if ((err = dst_ifp->if_transmit(dst_ifp, m))) {
2028 			m_freem(m0);
2029 			if_inc_counter(sc->sc_ifp, IFCOUNTER_OERRORS, 1);
2030 			break;
2031 		}
2032 
2033 		if_inc_counter(sc->sc_ifp, IFCOUNTER_OPACKETS, 1);
2034 		if_inc_counter(sc->sc_ifp, IFCOUNTER_OBYTES, len);
2035 		if (mflags & M_MCAST)
2036 			if_inc_counter(sc->sc_ifp, IFCOUNTER_OMCASTS, 1);
2037 	}
2038 
2039 	return (err);
2040 }
2041 
2042 /*
2043  * bridge_dummynet:
2044  *
2045  * 	Receive a queued packet from dummynet and pass it on to the output
2046  * 	interface.
2047  *
2048  *	The mbuf has the Ethernet header already attached.
2049  */
2050 static void
2051 bridge_dummynet(struct mbuf *m, struct ifnet *ifp)
2052 {
2053 	struct bridge_softc *sc;
2054 
2055 	sc = ifp->if_bridge;
2056 
2057 	/*
2058 	 * The packet didnt originate from a member interface. This should only
2059 	 * ever happen if a member interface is removed while packets are
2060 	 * queued for it.
2061 	 */
2062 	if (sc == NULL) {
2063 		m_freem(m);
2064 		return;
2065 	}
2066 
2067 	if (PFIL_HOOKED_OUT(V_inet_pfil_head)
2068 #ifdef INET6
2069 	    || PFIL_HOOKED_OUT(V_inet6_pfil_head)
2070 #endif
2071 	    ) {
2072 		if (bridge_pfil(&m, sc->sc_ifp, ifp, PFIL_OUT) != 0)
2073 			return;
2074 		if (m == NULL)
2075 			return;
2076 	}
2077 
2078 	bridge_enqueue(sc, ifp, m);
2079 }
2080 
2081 /*
2082  * bridge_output:
2083  *
2084  *	Send output from a bridge member interface.  This
2085  *	performs the bridging function for locally originated
2086  *	packets.
2087  *
2088  *	The mbuf has the Ethernet header already attached.  We must
2089  *	enqueue or free the mbuf before returning.
2090  */
2091 static int
2092 bridge_output(struct ifnet *ifp, struct mbuf *m, struct sockaddr *sa,
2093     struct rtentry *rt)
2094 {
2095 	struct ether_header *eh;
2096 	struct ifnet *bifp, *dst_if;
2097 	struct bridge_softc *sc;
2098 	uint16_t vlan;
2099 
2100 	NET_EPOCH_ASSERT();
2101 
2102 	if (m->m_len < ETHER_HDR_LEN) {
2103 		m = m_pullup(m, ETHER_HDR_LEN);
2104 		if (m == NULL)
2105 			return (0);
2106 	}
2107 
2108 	eh = mtod(m, struct ether_header *);
2109 	sc = ifp->if_bridge;
2110 	vlan = VLANTAGOF(m);
2111 
2112 	bifp = sc->sc_ifp;
2113 
2114 	/*
2115 	 * If bridge is down, but the original output interface is up,
2116 	 * go ahead and send out that interface.  Otherwise, the packet
2117 	 * is dropped below.
2118 	 */
2119 	if ((bifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
2120 		dst_if = ifp;
2121 		goto sendunicast;
2122 	}
2123 
2124 	/*
2125 	 * If the packet is a multicast, or we don't know a better way to
2126 	 * get there, send to all interfaces.
2127 	 */
2128 	if (ETHER_IS_MULTICAST(eh->ether_dhost))
2129 		dst_if = NULL;
2130 	else
2131 		dst_if = bridge_rtlookup(sc, eh->ether_dhost, vlan);
2132 	/* Tap any traffic not passing back out the originating interface */
2133 	if (dst_if != ifp)
2134 		ETHER_BPF_MTAP(bifp, m);
2135 	if (dst_if == NULL) {
2136 		struct bridge_iflist *bif;
2137 		struct mbuf *mc;
2138 		int used = 0;
2139 
2140 		bridge_span(sc, m);
2141 
2142 		CK_LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
2143 			dst_if = bif->bif_ifp;
2144 
2145 			if (dst_if->if_type == IFT_GIF)
2146 				continue;
2147 			if ((dst_if->if_drv_flags & IFF_DRV_RUNNING) == 0)
2148 				continue;
2149 
2150 			/*
2151 			 * If this is not the original output interface,
2152 			 * and the interface is participating in spanning
2153 			 * tree, make sure the port is in a state that
2154 			 * allows forwarding.
2155 			 */
2156 			if (dst_if != ifp && (bif->bif_flags & IFBIF_STP) &&
2157 			    bif->bif_stp.bp_state == BSTP_IFSTATE_DISCARDING)
2158 				continue;
2159 
2160 			if (CK_LIST_NEXT(bif, bif_next) == NULL) {
2161 				used = 1;
2162 				mc = m;
2163 			} else {
2164 				mc = m_copypacket(m, M_NOWAIT);
2165 				if (mc == NULL) {
2166 					if_inc_counter(bifp, IFCOUNTER_OERRORS, 1);
2167 					continue;
2168 				}
2169 			}
2170 
2171 			bridge_enqueue(sc, dst_if, mc);
2172 		}
2173 		if (used == 0)
2174 			m_freem(m);
2175 		return (0);
2176 	}
2177 
2178 sendunicast:
2179 	/*
2180 	 * XXX Spanning tree consideration here?
2181 	 */
2182 
2183 	bridge_span(sc, m);
2184 	if ((dst_if->if_drv_flags & IFF_DRV_RUNNING) == 0) {
2185 		m_freem(m);
2186 		return (0);
2187 	}
2188 
2189 	bridge_enqueue(sc, dst_if, m);
2190 	return (0);
2191 }
2192 
2193 /*
2194  * bridge_transmit:
2195  *
2196  *	Do output on a bridge.
2197  *
2198  */
2199 static int
2200 bridge_transmit(struct ifnet *ifp, struct mbuf *m)
2201 {
2202 	struct bridge_softc *sc;
2203 	struct ether_header *eh;
2204 	struct ifnet *dst_if;
2205 	int error = 0;
2206 
2207 	sc = ifp->if_softc;
2208 
2209 	ETHER_BPF_MTAP(ifp, m);
2210 
2211 	eh = mtod(m, struct ether_header *);
2212 
2213 	if (((m->m_flags & (M_BCAST|M_MCAST)) == 0) &&
2214 	    (dst_if = bridge_rtlookup(sc, eh->ether_dhost, 1)) != NULL) {
2215 		error = bridge_enqueue(sc, dst_if, m);
2216 	} else
2217 		bridge_broadcast(sc, ifp, m, 0);
2218 
2219 	return (error);
2220 }
2221 
2222 /*
2223  * The ifp->if_qflush entry point for if_bridge(4) is no-op.
2224  */
2225 static void
2226 bridge_qflush(struct ifnet *ifp __unused)
2227 {
2228 }
2229 
2230 /*
2231  * bridge_forward:
2232  *
2233  *	The forwarding function of the bridge.
2234  *
2235  *	NOTE: Releases the lock on return.
2236  */
2237 static void
2238 bridge_forward(struct bridge_softc *sc, struct bridge_iflist *sbif,
2239     struct mbuf *m)
2240 {
2241 	struct bridge_iflist *dbif;
2242 	struct ifnet *src_if, *dst_if, *ifp;
2243 	struct ether_header *eh;
2244 	uint16_t vlan;
2245 	uint8_t *dst;
2246 	int error;
2247 
2248 	NET_EPOCH_ASSERT();
2249 
2250 	src_if = m->m_pkthdr.rcvif;
2251 	ifp = sc->sc_ifp;
2252 
2253 	if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
2254 	if_inc_counter(ifp, IFCOUNTER_IBYTES, m->m_pkthdr.len);
2255 	vlan = VLANTAGOF(m);
2256 
2257 	if ((sbif->bif_flags & IFBIF_STP) &&
2258 	    sbif->bif_stp.bp_state == BSTP_IFSTATE_DISCARDING)
2259 		goto drop;
2260 
2261 	eh = mtod(m, struct ether_header *);
2262 	dst = eh->ether_dhost;
2263 
2264 	/* If the interface is learning, record the address. */
2265 	if (sbif->bif_flags & IFBIF_LEARNING) {
2266 		error = bridge_rtupdate(sc, eh->ether_shost, vlan,
2267 		    sbif, 0, IFBAF_DYNAMIC);
2268 		/*
2269 		 * If the interface has addresses limits then deny any source
2270 		 * that is not in the cache.
2271 		 */
2272 		if (error && sbif->bif_addrmax)
2273 			goto drop;
2274 	}
2275 
2276 	if ((sbif->bif_flags & IFBIF_STP) != 0 &&
2277 	    sbif->bif_stp.bp_state == BSTP_IFSTATE_LEARNING)
2278 		goto drop;
2279 
2280 	/*
2281 	 * At this point, the port either doesn't participate
2282 	 * in spanning tree or it is in the forwarding state.
2283 	 */
2284 
2285 	/*
2286 	 * If the packet is unicast, destined for someone on
2287 	 * "this" side of the bridge, drop it.
2288 	 */
2289 	if ((m->m_flags & (M_BCAST|M_MCAST)) == 0) {
2290 		dst_if = bridge_rtlookup(sc, dst, vlan);
2291 		if (src_if == dst_if)
2292 			goto drop;
2293 	} else {
2294 		/*
2295 		 * Check if its a reserved multicast address, any address
2296 		 * listed in 802.1D section 7.12.6 may not be forwarded by the
2297 		 * bridge.
2298 		 * This is currently 01-80-C2-00-00-00 to 01-80-C2-00-00-0F
2299 		 */
2300 		if (dst[0] == 0x01 && dst[1] == 0x80 &&
2301 		    dst[2] == 0xc2 && dst[3] == 0x00 &&
2302 		    dst[4] == 0x00 && dst[5] <= 0x0f)
2303 			goto drop;
2304 
2305 		/* ...forward it to all interfaces. */
2306 		if_inc_counter(ifp, IFCOUNTER_IMCASTS, 1);
2307 		dst_if = NULL;
2308 	}
2309 
2310 	/*
2311 	 * If we have a destination interface which is a member of our bridge,
2312 	 * OR this is a unicast packet, push it through the bpf(4) machinery.
2313 	 * For broadcast or multicast packets, don't bother because it will
2314 	 * be reinjected into ether_input. We do this before we pass the packets
2315 	 * through the pfil(9) framework, as it is possible that pfil(9) will
2316 	 * drop the packet, or possibly modify it, making it difficult to debug
2317 	 * firewall issues on the bridge.
2318 	 */
2319 	if (dst_if != NULL || (m->m_flags & (M_BCAST | M_MCAST)) == 0)
2320 		ETHER_BPF_MTAP(ifp, m);
2321 
2322 	/* run the packet filter */
2323 	if (PFIL_HOOKED_IN(V_inet_pfil_head)
2324 #ifdef INET6
2325 	    || PFIL_HOOKED_IN(V_inet6_pfil_head)
2326 #endif
2327 	    ) {
2328 		if (bridge_pfil(&m, ifp, src_if, PFIL_IN) != 0)
2329 			return;
2330 		if (m == NULL)
2331 			return;
2332 	}
2333 
2334 	if (dst_if == NULL) {
2335 		bridge_broadcast(sc, src_if, m, 1);
2336 		return;
2337 	}
2338 
2339 	/*
2340 	 * At this point, we're dealing with a unicast frame
2341 	 * going to a different interface.
2342 	 */
2343 	if ((dst_if->if_drv_flags & IFF_DRV_RUNNING) == 0)
2344 		goto drop;
2345 
2346 	dbif = bridge_lookup_member_if(sc, dst_if);
2347 	if (dbif == NULL)
2348 		/* Not a member of the bridge (anymore?) */
2349 		goto drop;
2350 
2351 	/* Private segments can not talk to each other */
2352 	if (sbif->bif_flags & dbif->bif_flags & IFBIF_PRIVATE)
2353 		goto drop;
2354 
2355 	if ((dbif->bif_flags & IFBIF_STP) &&
2356 	    dbif->bif_stp.bp_state == BSTP_IFSTATE_DISCARDING)
2357 		goto drop;
2358 
2359 	if (PFIL_HOOKED_OUT(V_inet_pfil_head)
2360 #ifdef INET6
2361 	    || PFIL_HOOKED_OUT(V_inet6_pfil_head)
2362 #endif
2363 	    ) {
2364 		if (bridge_pfil(&m, ifp, dst_if, PFIL_OUT) != 0)
2365 			return;
2366 		if (m == NULL)
2367 			return;
2368 	}
2369 
2370 	bridge_enqueue(sc, dst_if, m);
2371 	return;
2372 
2373 drop:
2374 	m_freem(m);
2375 }
2376 
2377 /*
2378  * bridge_input:
2379  *
2380  *	Receive input from a member interface.  Queue the packet for
2381  *	bridging if it is not for us.
2382  */
2383 static struct mbuf *
2384 bridge_input(struct ifnet *ifp, struct mbuf *m)
2385 {
2386 	struct bridge_softc *sc = ifp->if_bridge;
2387 	struct bridge_iflist *bif, *bif2;
2388 	struct ifnet *bifp;
2389 	struct ether_header *eh;
2390 	struct mbuf *mc, *mc2;
2391 	uint16_t vlan;
2392 	int error;
2393 
2394 	NET_EPOCH_ASSERT();
2395 
2396 	if ((sc->sc_ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
2397 		return (m);
2398 
2399 	bifp = sc->sc_ifp;
2400 	vlan = VLANTAGOF(m);
2401 
2402 	/*
2403 	 * Implement support for bridge monitoring. If this flag has been
2404 	 * set on this interface, discard the packet once we push it through
2405 	 * the bpf(4) machinery, but before we do, increment the byte and
2406 	 * packet counters associated with this interface.
2407 	 */
2408 	if ((bifp->if_flags & IFF_MONITOR) != 0) {
2409 		m->m_pkthdr.rcvif  = bifp;
2410 		ETHER_BPF_MTAP(bifp, m);
2411 		if_inc_counter(bifp, IFCOUNTER_IPACKETS, 1);
2412 		if_inc_counter(bifp, IFCOUNTER_IBYTES, m->m_pkthdr.len);
2413 		m_freem(m);
2414 		return (NULL);
2415 	}
2416 	bif = bridge_lookup_member_if(sc, ifp);
2417 	if (bif == NULL) {
2418 		return (m);
2419 	}
2420 
2421 	eh = mtod(m, struct ether_header *);
2422 
2423 	bridge_span(sc, m);
2424 
2425 	if (m->m_flags & (M_BCAST|M_MCAST)) {
2426 		/* Tap off 802.1D packets; they do not get forwarded. */
2427 		if (memcmp(eh->ether_dhost, bstp_etheraddr,
2428 		    ETHER_ADDR_LEN) == 0) {
2429 			bstp_input(&bif->bif_stp, ifp, m); /* consumes mbuf */
2430 			return (NULL);
2431 		}
2432 
2433 		if ((bif->bif_flags & IFBIF_STP) &&
2434 		    bif->bif_stp.bp_state == BSTP_IFSTATE_DISCARDING) {
2435 			return (m);
2436 		}
2437 
2438 		/*
2439 		 * Make a deep copy of the packet and enqueue the copy
2440 		 * for bridge processing; return the original packet for
2441 		 * local processing.
2442 		 */
2443 		mc = m_dup(m, M_NOWAIT);
2444 		if (mc == NULL) {
2445 			return (m);
2446 		}
2447 
2448 		/* Perform the bridge forwarding function with the copy. */
2449 		bridge_forward(sc, bif, mc);
2450 
2451 		/*
2452 		 * Reinject the mbuf as arriving on the bridge so we have a
2453 		 * chance at claiming multicast packets. We can not loop back
2454 		 * here from ether_input as a bridge is never a member of a
2455 		 * bridge.
2456 		 */
2457 		KASSERT(bifp->if_bridge == NULL,
2458 		    ("loop created in bridge_input"));
2459 		mc2 = m_dup(m, M_NOWAIT);
2460 		if (mc2 != NULL) {
2461 			/* Keep the layer3 header aligned */
2462 			int i = min(mc2->m_pkthdr.len, max_protohdr);
2463 			mc2 = m_copyup(mc2, i, ETHER_ALIGN);
2464 		}
2465 		if (mc2 != NULL) {
2466 			mc2->m_pkthdr.rcvif = bifp;
2467 			(*bifp->if_input)(bifp, mc2);
2468 		}
2469 
2470 		/* Return the original packet for local processing. */
2471 		return (m);
2472 	}
2473 
2474 	if ((bif->bif_flags & IFBIF_STP) &&
2475 	    bif->bif_stp.bp_state == BSTP_IFSTATE_DISCARDING) {
2476 		return (m);
2477 	}
2478 
2479 #if (defined(INET) || defined(INET6))
2480 #   define OR_CARP_CHECK_WE_ARE_DST(iface) \
2481 	|| ((iface)->if_carp \
2482 	    && (*carp_forus_p)((iface), eh->ether_dhost))
2483 #   define OR_CARP_CHECK_WE_ARE_SRC(iface) \
2484 	|| ((iface)->if_carp \
2485 	    && (*carp_forus_p)((iface), eh->ether_shost))
2486 #else
2487 #   define OR_CARP_CHECK_WE_ARE_DST(iface)
2488 #   define OR_CARP_CHECK_WE_ARE_SRC(iface)
2489 #endif
2490 
2491 #ifdef INET6
2492 #   define OR_PFIL_HOOKED_INET6 \
2493 	|| PFIL_HOOKED_IN(V_inet6_pfil_head)
2494 #else
2495 #   define OR_PFIL_HOOKED_INET6
2496 #endif
2497 
2498 #define GRAB_OUR_PACKETS(iface) \
2499 	if ((iface)->if_type == IFT_GIF) \
2500 		continue; \
2501 	/* It is destined for us. */ \
2502 	if (memcmp(IF_LLADDR((iface)), eh->ether_dhost,  ETHER_ADDR_LEN) == 0 \
2503 	    OR_CARP_CHECK_WE_ARE_DST((iface))				\
2504 	    ) {								\
2505 		if (bif->bif_flags & IFBIF_LEARNING) {			\
2506 			error = bridge_rtupdate(sc, eh->ether_shost,	\
2507 			    vlan, bif, 0, IFBAF_DYNAMIC);		\
2508 			if (error && bif->bif_addrmax) {		\
2509 				m_freem(m);				\
2510 				return (NULL);				\
2511 			}						\
2512 		}							\
2513 		m->m_pkthdr.rcvif = iface;				\
2514 		if ((iface) == ifp) {					\
2515 			/* Skip bridge processing... src == dest */	\
2516 			return (m);					\
2517 		}							\
2518 		/* It's passing over or to the bridge, locally. */	\
2519 		ETHER_BPF_MTAP(bifp, m);				\
2520 		if_inc_counter(bifp, IFCOUNTER_IPACKETS, 1);		\
2521 		if_inc_counter(bifp, IFCOUNTER_IBYTES, m->m_pkthdr.len); \
2522 		/* Filter on the physical interface. */			\
2523 		if (V_pfil_local_phys && (PFIL_HOOKED_IN(V_inet_pfil_head) \
2524 		     OR_PFIL_HOOKED_INET6)) {				\
2525 			if (bridge_pfil(&m, NULL, ifp,			\
2526 			    PFIL_IN) != 0 || m == NULL) {		\
2527 				return (NULL);				\
2528 			}						\
2529 		}							\
2530 		if ((iface) != bifp)					\
2531 			ETHER_BPF_MTAP(iface, m);			\
2532 		return (m);						\
2533 	}								\
2534 									\
2535 	/* We just received a packet that we sent out. */		\
2536 	if (memcmp(IF_LLADDR((iface)), eh->ether_shost, ETHER_ADDR_LEN) == 0 \
2537 	    OR_CARP_CHECK_WE_ARE_SRC((iface))			\
2538 	    ) {								\
2539 		m_freem(m);						\
2540 		return (NULL);						\
2541 	}
2542 
2543 	/*
2544 	 * Unicast.  Make sure it's not for the bridge.
2545 	 */
2546 	do { GRAB_OUR_PACKETS(bifp) } while (0);
2547 
2548 	/*
2549 	 * Give a chance for ifp at first priority. This will help when	the
2550 	 * packet comes through the interface like VLAN's with the same MACs
2551 	 * on several interfaces from the same bridge. This also will save
2552 	 * some CPU cycles in case the destination interface and the input
2553 	 * interface (eq ifp) are the same.
2554 	 */
2555 	do { GRAB_OUR_PACKETS(ifp) } while (0);
2556 
2557 	/* Now check the all bridge members. */
2558 	CK_LIST_FOREACH(bif2, &sc->sc_iflist, bif_next) {
2559 		GRAB_OUR_PACKETS(bif2->bif_ifp)
2560 	}
2561 
2562 #undef OR_CARP_CHECK_WE_ARE_DST
2563 #undef OR_CARP_CHECK_WE_ARE_SRC
2564 #undef OR_PFIL_HOOKED_INET6
2565 #undef GRAB_OUR_PACKETS
2566 
2567 	/* Perform the bridge forwarding function. */
2568 	bridge_forward(sc, bif, m);
2569 
2570 	return (NULL);
2571 }
2572 
2573 /*
2574  * bridge_broadcast:
2575  *
2576  *	Send a frame to all interfaces that are members of
2577  *	the bridge, except for the one on which the packet
2578  *	arrived.
2579  *
2580  *	NOTE: Releases the lock on return.
2581  */
2582 static void
2583 bridge_broadcast(struct bridge_softc *sc, struct ifnet *src_if,
2584     struct mbuf *m, int runfilt)
2585 {
2586 	struct bridge_iflist *dbif, *sbif;
2587 	struct mbuf *mc;
2588 	struct ifnet *dst_if;
2589 	int used = 0, i;
2590 
2591 	NET_EPOCH_ASSERT();
2592 
2593 	sbif = bridge_lookup_member_if(sc, src_if);
2594 
2595 	/* Filter on the bridge interface before broadcasting */
2596 	if (runfilt && (PFIL_HOOKED_OUT(V_inet_pfil_head)
2597 #ifdef INET6
2598 	    || PFIL_HOOKED_OUT(V_inet6_pfil_head)
2599 #endif
2600 	    )) {
2601 		if (bridge_pfil(&m, sc->sc_ifp, NULL, PFIL_OUT) != 0)
2602 			return;
2603 		if (m == NULL)
2604 			return;
2605 	}
2606 
2607 	CK_LIST_FOREACH(dbif, &sc->sc_iflist, bif_next) {
2608 		dst_if = dbif->bif_ifp;
2609 		if (dst_if == src_if)
2610 			continue;
2611 
2612 		/* Private segments can not talk to each other */
2613 		if (sbif && (sbif->bif_flags & dbif->bif_flags & IFBIF_PRIVATE))
2614 			continue;
2615 
2616 		if ((dbif->bif_flags & IFBIF_STP) &&
2617 		    dbif->bif_stp.bp_state == BSTP_IFSTATE_DISCARDING)
2618 			continue;
2619 
2620 		if ((dbif->bif_flags & IFBIF_DISCOVER) == 0 &&
2621 		    (m->m_flags & (M_BCAST|M_MCAST)) == 0)
2622 			continue;
2623 
2624 		if ((dst_if->if_drv_flags & IFF_DRV_RUNNING) == 0)
2625 			continue;
2626 
2627 		if (CK_LIST_NEXT(dbif, bif_next) == NULL) {
2628 			mc = m;
2629 			used = 1;
2630 		} else {
2631 			mc = m_dup(m, M_NOWAIT);
2632 			if (mc == NULL) {
2633 				if_inc_counter(sc->sc_ifp, IFCOUNTER_OERRORS, 1);
2634 				continue;
2635 			}
2636 		}
2637 
2638 		/*
2639 		 * Filter on the output interface. Pass a NULL bridge interface
2640 		 * pointer so we do not redundantly filter on the bridge for
2641 		 * each interface we broadcast on.
2642 		 */
2643 		if (runfilt && (PFIL_HOOKED_OUT(V_inet_pfil_head)
2644 #ifdef INET6
2645 		    || PFIL_HOOKED_OUT(V_inet6_pfil_head)
2646 #endif
2647 		    )) {
2648 			if (used == 0) {
2649 				/* Keep the layer3 header aligned */
2650 				i = min(mc->m_pkthdr.len, max_protohdr);
2651 				mc = m_copyup(mc, i, ETHER_ALIGN);
2652 				if (mc == NULL) {
2653 					if_inc_counter(sc->sc_ifp, IFCOUNTER_OERRORS, 1);
2654 					continue;
2655 				}
2656 			}
2657 			if (bridge_pfil(&mc, NULL, dst_if, PFIL_OUT) != 0)
2658 				continue;
2659 			if (mc == NULL)
2660 				continue;
2661 		}
2662 
2663 		bridge_enqueue(sc, dst_if, mc);
2664 	}
2665 	if (used == 0)
2666 		m_freem(m);
2667 }
2668 
2669 /*
2670  * bridge_span:
2671  *
2672  *	Duplicate a packet out one or more interfaces that are in span mode,
2673  *	the original mbuf is unmodified.
2674  */
2675 static void
2676 bridge_span(struct bridge_softc *sc, struct mbuf *m)
2677 {
2678 	struct bridge_iflist *bif;
2679 	struct ifnet *dst_if;
2680 	struct mbuf *mc;
2681 
2682 	NET_EPOCH_ASSERT();
2683 
2684 	if (CK_LIST_EMPTY(&sc->sc_spanlist))
2685 		return;
2686 
2687 	CK_LIST_FOREACH(bif, &sc->sc_spanlist, bif_next) {
2688 		dst_if = bif->bif_ifp;
2689 
2690 		if ((dst_if->if_drv_flags & IFF_DRV_RUNNING) == 0)
2691 			continue;
2692 
2693 		mc = m_copypacket(m, M_NOWAIT);
2694 		if (mc == NULL) {
2695 			if_inc_counter(sc->sc_ifp, IFCOUNTER_OERRORS, 1);
2696 			continue;
2697 		}
2698 
2699 		bridge_enqueue(sc, dst_if, mc);
2700 	}
2701 }
2702 
2703 /*
2704  * bridge_rtupdate:
2705  *
2706  *	Add a bridge routing entry.
2707  */
2708 static int
2709 bridge_rtupdate(struct bridge_softc *sc, const uint8_t *dst, uint16_t vlan,
2710     struct bridge_iflist *bif, int setflags, uint8_t flags)
2711 {
2712 	struct bridge_rtnode *brt;
2713 	int error;
2714 
2715 	BRIDGE_LOCK_OR_NET_EPOCH_ASSERT(sc);
2716 
2717 	/* Check the source address is valid and not multicast. */
2718 	if (ETHER_IS_MULTICAST(dst) ||
2719 	    (dst[0] == 0 && dst[1] == 0 && dst[2] == 0 &&
2720 	     dst[3] == 0 && dst[4] == 0 && dst[5] == 0) != 0)
2721 		return (EINVAL);
2722 
2723 	/* 802.1p frames map to vlan 1 */
2724 	if (vlan == 0)
2725 		vlan = 1;
2726 
2727 	/*
2728 	 * A route for this destination might already exist.  If so,
2729 	 * update it, otherwise create a new one.
2730 	 */
2731 	if ((brt = bridge_rtnode_lookup(sc, dst, vlan)) == NULL) {
2732 		BRIDGE_RT_LOCK(sc);
2733 
2734 		/* Check again, now that we have the lock. There could have
2735 		 * been a race and we only want to insert this once. */
2736 		if ((brt = bridge_rtnode_lookup(sc, dst, vlan)) != NULL) {
2737 			BRIDGE_RT_UNLOCK(sc);
2738 			return (0);
2739 		}
2740 
2741 		if (sc->sc_brtcnt >= sc->sc_brtmax) {
2742 			sc->sc_brtexceeded++;
2743 			BRIDGE_RT_UNLOCK(sc);
2744 			return (ENOSPC);
2745 		}
2746 		/* Check per interface address limits (if enabled) */
2747 		if (bif->bif_addrmax && bif->bif_addrcnt >= bif->bif_addrmax) {
2748 			bif->bif_addrexceeded++;
2749 			BRIDGE_RT_UNLOCK(sc);
2750 			return (ENOSPC);
2751 		}
2752 
2753 		/*
2754 		 * Allocate a new bridge forwarding node, and
2755 		 * initialize the expiration time and Ethernet
2756 		 * address.
2757 		 */
2758 		brt = uma_zalloc(V_bridge_rtnode_zone, M_NOWAIT | M_ZERO);
2759 		if (brt == NULL) {
2760 			BRIDGE_RT_UNLOCK(sc);
2761 			return (ENOMEM);
2762 		}
2763 		brt->brt_vnet = curvnet;
2764 
2765 		if (bif->bif_flags & IFBIF_STICKY)
2766 			brt->brt_flags = IFBAF_STICKY;
2767 		else
2768 			brt->brt_flags = IFBAF_DYNAMIC;
2769 
2770 		memcpy(brt->brt_addr, dst, ETHER_ADDR_LEN);
2771 		brt->brt_vlan = vlan;
2772 
2773 		if ((error = bridge_rtnode_insert(sc, brt)) != 0) {
2774 			uma_zfree(V_bridge_rtnode_zone, brt);
2775 			BRIDGE_RT_UNLOCK(sc);
2776 			return (error);
2777 		}
2778 		brt->brt_dst = bif;
2779 		bif->bif_addrcnt++;
2780 
2781 		BRIDGE_RT_UNLOCK(sc);
2782 	}
2783 
2784 	if ((brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC &&
2785 	    brt->brt_dst != bif) {
2786 		BRIDGE_RT_LOCK(sc);
2787 		brt->brt_dst->bif_addrcnt--;
2788 		brt->brt_dst = bif;
2789 		brt->brt_dst->bif_addrcnt++;
2790 		BRIDGE_RT_UNLOCK(sc);
2791 	}
2792 
2793 	if ((flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC)
2794 		brt->brt_expire = time_uptime + sc->sc_brttimeout;
2795 	if (setflags)
2796 		brt->brt_flags = flags;
2797 
2798 	return (0);
2799 }
2800 
2801 /*
2802  * bridge_rtlookup:
2803  *
2804  *	Lookup the destination interface for an address.
2805  */
2806 static struct ifnet *
2807 bridge_rtlookup(struct bridge_softc *sc, const uint8_t *addr, uint16_t vlan)
2808 {
2809 	struct bridge_rtnode *brt;
2810 
2811 	NET_EPOCH_ASSERT();
2812 
2813 	if ((brt = bridge_rtnode_lookup(sc, addr, vlan)) == NULL)
2814 		return (NULL);
2815 
2816 	return (brt->brt_ifp);
2817 }
2818 
2819 /*
2820  * bridge_rttrim:
2821  *
2822  *	Trim the routine table so that we have a number
2823  *	of routing entries less than or equal to the
2824  *	maximum number.
2825  */
2826 static void
2827 bridge_rttrim(struct bridge_softc *sc)
2828 {
2829 	struct bridge_rtnode *brt, *nbrt;
2830 
2831 	NET_EPOCH_ASSERT();
2832 	BRIDGE_RT_LOCK_ASSERT(sc);
2833 
2834 	/* Make sure we actually need to do this. */
2835 	if (sc->sc_brtcnt <= sc->sc_brtmax)
2836 		return;
2837 
2838 	/* Force an aging cycle; this might trim enough addresses. */
2839 	bridge_rtage(sc);
2840 	if (sc->sc_brtcnt <= sc->sc_brtmax)
2841 		return;
2842 
2843 	CK_LIST_FOREACH_SAFE(brt, &sc->sc_rtlist, brt_list, nbrt) {
2844 		if ((brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC) {
2845 			bridge_rtnode_destroy(sc, brt);
2846 			if (sc->sc_brtcnt <= sc->sc_brtmax)
2847 				return;
2848 		}
2849 	}
2850 }
2851 
2852 /*
2853  * bridge_timer:
2854  *
2855  *	Aging timer for the bridge.
2856  */
2857 static void
2858 bridge_timer(void *arg)
2859 {
2860 	struct bridge_softc *sc = arg;
2861 
2862 	BRIDGE_RT_LOCK_ASSERT(sc);
2863 
2864 	/* Destruction of rtnodes requires a proper vnet context */
2865 	CURVNET_SET(sc->sc_ifp->if_vnet);
2866 	bridge_rtage(sc);
2867 
2868 	if (sc->sc_ifp->if_drv_flags & IFF_DRV_RUNNING)
2869 		callout_reset(&sc->sc_brcallout,
2870 		    bridge_rtable_prune_period * hz, bridge_timer, sc);
2871 	CURVNET_RESTORE();
2872 }
2873 
2874 /*
2875  * bridge_rtage:
2876  *
2877  *	Perform an aging cycle.
2878  */
2879 static void
2880 bridge_rtage(struct bridge_softc *sc)
2881 {
2882 	struct bridge_rtnode *brt, *nbrt;
2883 
2884 	BRIDGE_RT_LOCK_ASSERT(sc);
2885 
2886 	CK_LIST_FOREACH_SAFE(brt, &sc->sc_rtlist, brt_list, nbrt) {
2887 		if ((brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC) {
2888 			if (time_uptime >= brt->brt_expire)
2889 				bridge_rtnode_destroy(sc, brt);
2890 		}
2891 	}
2892 }
2893 
2894 /*
2895  * bridge_rtflush:
2896  *
2897  *	Remove all dynamic addresses from the bridge.
2898  */
2899 static void
2900 bridge_rtflush(struct bridge_softc *sc, int full)
2901 {
2902 	struct bridge_rtnode *brt, *nbrt;
2903 
2904 	BRIDGE_RT_LOCK_ASSERT(sc);
2905 
2906 	CK_LIST_FOREACH_SAFE(brt, &sc->sc_rtlist, brt_list, nbrt) {
2907 		if (full || (brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC)
2908 			bridge_rtnode_destroy(sc, brt);
2909 	}
2910 }
2911 
2912 /*
2913  * bridge_rtdaddr:
2914  *
2915  *	Remove an address from the table.
2916  */
2917 static int
2918 bridge_rtdaddr(struct bridge_softc *sc, const uint8_t *addr, uint16_t vlan)
2919 {
2920 	struct bridge_rtnode *brt;
2921 	int found = 0;
2922 
2923 	BRIDGE_RT_LOCK(sc);
2924 
2925 	/*
2926 	 * If vlan is zero then we want to delete for all vlans so the lookup
2927 	 * may return more than one.
2928 	 */
2929 	while ((brt = bridge_rtnode_lookup(sc, addr, vlan)) != NULL) {
2930 		bridge_rtnode_destroy(sc, brt);
2931 		found = 1;
2932 	}
2933 
2934 	BRIDGE_RT_UNLOCK(sc);
2935 
2936 	return (found ? 0 : ENOENT);
2937 }
2938 
2939 /*
2940  * bridge_rtdelete:
2941  *
2942  *	Delete routes to a speicifc member interface.
2943  */
2944 static void
2945 bridge_rtdelete(struct bridge_softc *sc, struct ifnet *ifp, int full)
2946 {
2947 	struct bridge_rtnode *brt, *nbrt;
2948 
2949 	BRIDGE_RT_LOCK_ASSERT(sc);
2950 
2951 	CK_LIST_FOREACH_SAFE(brt, &sc->sc_rtlist, brt_list, nbrt) {
2952 		if (brt->brt_ifp == ifp && (full ||
2953 			    (brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC))
2954 			bridge_rtnode_destroy(sc, brt);
2955 	}
2956 }
2957 
2958 /*
2959  * bridge_rtable_init:
2960  *
2961  *	Initialize the route table for this bridge.
2962  */
2963 static void
2964 bridge_rtable_init(struct bridge_softc *sc)
2965 {
2966 	int i;
2967 
2968 	sc->sc_rthash = malloc(sizeof(*sc->sc_rthash) * BRIDGE_RTHASH_SIZE,
2969 	    M_DEVBUF, M_WAITOK);
2970 
2971 	for (i = 0; i < BRIDGE_RTHASH_SIZE; i++)
2972 		CK_LIST_INIT(&sc->sc_rthash[i]);
2973 
2974 	sc->sc_rthash_key = arc4random();
2975 	CK_LIST_INIT(&sc->sc_rtlist);
2976 }
2977 
2978 /*
2979  * bridge_rtable_fini:
2980  *
2981  *	Deconstruct the route table for this bridge.
2982  */
2983 static void
2984 bridge_rtable_fini(struct bridge_softc *sc)
2985 {
2986 
2987 	KASSERT(sc->sc_brtcnt == 0,
2988 	    ("%s: %d bridge routes referenced", __func__, sc->sc_brtcnt));
2989 	free(sc->sc_rthash, M_DEVBUF);
2990 }
2991 
2992 /*
2993  * The following hash function is adapted from "Hash Functions" by Bob Jenkins
2994  * ("Algorithm Alley", Dr. Dobbs Journal, September 1997).
2995  */
2996 #define	mix(a, b, c)							\
2997 do {									\
2998 	a -= b; a -= c; a ^= (c >> 13);					\
2999 	b -= c; b -= a; b ^= (a << 8);					\
3000 	c -= a; c -= b; c ^= (b >> 13);					\
3001 	a -= b; a -= c; a ^= (c >> 12);					\
3002 	b -= c; b -= a; b ^= (a << 16);					\
3003 	c -= a; c -= b; c ^= (b >> 5);					\
3004 	a -= b; a -= c; a ^= (c >> 3);					\
3005 	b -= c; b -= a; b ^= (a << 10);					\
3006 	c -= a; c -= b; c ^= (b >> 15);					\
3007 } while (/*CONSTCOND*/0)
3008 
3009 static __inline uint32_t
3010 bridge_rthash(struct bridge_softc *sc, const uint8_t *addr)
3011 {
3012 	uint32_t a = 0x9e3779b9, b = 0x9e3779b9, c = sc->sc_rthash_key;
3013 
3014 	b += addr[5] << 8;
3015 	b += addr[4];
3016 	a += addr[3] << 24;
3017 	a += addr[2] << 16;
3018 	a += addr[1] << 8;
3019 	a += addr[0];
3020 
3021 	mix(a, b, c);
3022 
3023 	return (c & BRIDGE_RTHASH_MASK);
3024 }
3025 
3026 #undef mix
3027 
3028 static int
3029 bridge_rtnode_addr_cmp(const uint8_t *a, const uint8_t *b)
3030 {
3031 	int i, d;
3032 
3033 	for (i = 0, d = 0; i < ETHER_ADDR_LEN && d == 0; i++) {
3034 		d = ((int)a[i]) - ((int)b[i]);
3035 	}
3036 
3037 	return (d);
3038 }
3039 
3040 /*
3041  * bridge_rtnode_lookup:
3042  *
3043  *	Look up a bridge route node for the specified destination. Compare the
3044  *	vlan id or if zero then just return the first match.
3045  */
3046 static struct bridge_rtnode *
3047 bridge_rtnode_lookup(struct bridge_softc *sc, const uint8_t *addr, uint16_t vlan)
3048 {
3049 	struct bridge_rtnode *brt;
3050 	uint32_t hash;
3051 	int dir;
3052 
3053 	BRIDGE_RT_LOCK_OR_NET_EPOCH_ASSERT(sc);
3054 
3055 	hash = bridge_rthash(sc, addr);
3056 	CK_LIST_FOREACH(brt, &sc->sc_rthash[hash], brt_hash) {
3057 		dir = bridge_rtnode_addr_cmp(addr, brt->brt_addr);
3058 		if (dir == 0 && (brt->brt_vlan == vlan || vlan == 0))
3059 			return (brt);
3060 		if (dir > 0)
3061 			return (NULL);
3062 	}
3063 
3064 	return (NULL);
3065 }
3066 
3067 /*
3068  * bridge_rtnode_insert:
3069  *
3070  *	Insert the specified bridge node into the route table.  We
3071  *	assume the entry is not already in the table.
3072  */
3073 static int
3074 bridge_rtnode_insert(struct bridge_softc *sc, struct bridge_rtnode *brt)
3075 {
3076 	struct bridge_rtnode *lbrt;
3077 	uint32_t hash;
3078 	int dir;
3079 
3080 	BRIDGE_RT_LOCK_ASSERT(sc);
3081 
3082 	hash = bridge_rthash(sc, brt->brt_addr);
3083 
3084 	lbrt = CK_LIST_FIRST(&sc->sc_rthash[hash]);
3085 	if (lbrt == NULL) {
3086 		CK_LIST_INSERT_HEAD(&sc->sc_rthash[hash], brt, brt_hash);
3087 		goto out;
3088 	}
3089 
3090 	do {
3091 		dir = bridge_rtnode_addr_cmp(brt->brt_addr, lbrt->brt_addr);
3092 		if (dir == 0 && brt->brt_vlan == lbrt->brt_vlan)
3093 			return (EEXIST);
3094 		if (dir > 0) {
3095 			CK_LIST_INSERT_BEFORE(lbrt, brt, brt_hash);
3096 			goto out;
3097 		}
3098 		if (CK_LIST_NEXT(lbrt, brt_hash) == NULL) {
3099 			CK_LIST_INSERT_AFTER(lbrt, brt, brt_hash);
3100 			goto out;
3101 		}
3102 		lbrt = CK_LIST_NEXT(lbrt, brt_hash);
3103 	} while (lbrt != NULL);
3104 
3105 #ifdef DIAGNOSTIC
3106 	panic("bridge_rtnode_insert: impossible");
3107 #endif
3108 
3109 out:
3110 	CK_LIST_INSERT_HEAD(&sc->sc_rtlist, brt, brt_list);
3111 	sc->sc_brtcnt++;
3112 
3113 	return (0);
3114 }
3115 
3116 static void
3117 bridge_rtnode_destroy_cb(struct epoch_context *ctx)
3118 {
3119 	struct bridge_rtnode *brt;
3120 
3121 	brt = __containerof(ctx, struct bridge_rtnode, brt_epoch_ctx);
3122 
3123 	CURVNET_SET(brt->brt_vnet);
3124 	uma_zfree(V_bridge_rtnode_zone, brt);
3125 	CURVNET_RESTORE();
3126 }
3127 
3128 /*
3129  * bridge_rtnode_destroy:
3130  *
3131  *	Destroy a bridge rtnode.
3132  */
3133 static void
3134 bridge_rtnode_destroy(struct bridge_softc *sc, struct bridge_rtnode *brt)
3135 {
3136 	BRIDGE_RT_LOCK_ASSERT(sc);
3137 
3138 	CK_LIST_REMOVE(brt, brt_hash);
3139 
3140 	CK_LIST_REMOVE(brt, brt_list);
3141 	sc->sc_brtcnt--;
3142 	brt->brt_dst->bif_addrcnt--;
3143 
3144 	NET_EPOCH_CALL(bridge_rtnode_destroy_cb, &brt->brt_epoch_ctx);
3145 }
3146 
3147 /*
3148  * bridge_rtable_expire:
3149  *
3150  *	Set the expiry time for all routes on an interface.
3151  */
3152 static void
3153 bridge_rtable_expire(struct ifnet *ifp, int age)
3154 {
3155 	struct bridge_softc *sc = ifp->if_bridge;
3156 	struct bridge_rtnode *brt;
3157 
3158 	CURVNET_SET(ifp->if_vnet);
3159 	BRIDGE_RT_LOCK(sc);
3160 
3161 	/*
3162 	 * If the age is zero then flush, otherwise set all the expiry times to
3163 	 * age for the interface
3164 	 */
3165 	if (age == 0)
3166 		bridge_rtdelete(sc, ifp, IFBF_FLUSHDYN);
3167 	else {
3168 		CK_LIST_FOREACH(brt, &sc->sc_rtlist, brt_list) {
3169 			/* Cap the expiry time to 'age' */
3170 			if (brt->brt_ifp == ifp &&
3171 			    brt->brt_expire > time_uptime + age &&
3172 			    (brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC)
3173 				brt->brt_expire = time_uptime + age;
3174 		}
3175 	}
3176 	BRIDGE_RT_UNLOCK(sc);
3177 	CURVNET_RESTORE();
3178 }
3179 
3180 /*
3181  * bridge_state_change:
3182  *
3183  *	Callback from the bridgestp code when a port changes states.
3184  */
3185 static void
3186 bridge_state_change(struct ifnet *ifp, int state)
3187 {
3188 	struct bridge_softc *sc = ifp->if_bridge;
3189 	static const char *stpstates[] = {
3190 		"disabled",
3191 		"listening",
3192 		"learning",
3193 		"forwarding",
3194 		"blocking",
3195 		"discarding"
3196 	};
3197 
3198 	CURVNET_SET(ifp->if_vnet);
3199 	if (V_log_stp)
3200 		log(LOG_NOTICE, "%s: state changed to %s on %s\n",
3201 		    sc->sc_ifp->if_xname, stpstates[state], ifp->if_xname);
3202 	CURVNET_RESTORE();
3203 }
3204 
3205 /*
3206  * Send bridge packets through pfil if they are one of the types pfil can deal
3207  * with, or if they are ARP or REVARP.  (pfil will pass ARP and REVARP without
3208  * question.) If *bifp or *ifp are NULL then packet filtering is skipped for
3209  * that interface.
3210  */
3211 static int
3212 bridge_pfil(struct mbuf **mp, struct ifnet *bifp, struct ifnet *ifp, int dir)
3213 {
3214 	int snap, error, i, hlen;
3215 	struct ether_header *eh1, eh2;
3216 	struct ip *ip;
3217 	struct llc llc1;
3218 	u_int16_t ether_type;
3219 	pfil_return_t rv;
3220 
3221 	snap = 0;
3222 	error = -1;	/* Default error if not error == 0 */
3223 
3224 #if 0
3225 	/* we may return with the IP fields swapped, ensure its not shared */
3226 	KASSERT(M_WRITABLE(*mp), ("%s: modifying a shared mbuf", __func__));
3227 #endif
3228 
3229 	if (V_pfil_bridge == 0 && V_pfil_member == 0 && V_pfil_ipfw == 0)
3230 		return (0); /* filtering is disabled */
3231 
3232 	i = min((*mp)->m_pkthdr.len, max_protohdr);
3233 	if ((*mp)->m_len < i) {
3234 	    *mp = m_pullup(*mp, i);
3235 	    if (*mp == NULL) {
3236 		printf("%s: m_pullup failed\n", __func__);
3237 		return (-1);
3238 	    }
3239 	}
3240 
3241 	eh1 = mtod(*mp, struct ether_header *);
3242 	ether_type = ntohs(eh1->ether_type);
3243 
3244 	/*
3245 	 * Check for SNAP/LLC.
3246 	 */
3247 	if (ether_type < ETHERMTU) {
3248 		struct llc *llc2 = (struct llc *)(eh1 + 1);
3249 
3250 		if ((*mp)->m_len >= ETHER_HDR_LEN + 8 &&
3251 		    llc2->llc_dsap == LLC_SNAP_LSAP &&
3252 		    llc2->llc_ssap == LLC_SNAP_LSAP &&
3253 		    llc2->llc_control == LLC_UI) {
3254 			ether_type = htons(llc2->llc_un.type_snap.ether_type);
3255 			snap = 1;
3256 		}
3257 	}
3258 
3259 	/*
3260 	 * If we're trying to filter bridge traffic, don't look at anything
3261 	 * other than IP and ARP traffic.  If the filter doesn't understand
3262 	 * IPv6, don't allow IPv6 through the bridge either.  This is lame
3263 	 * since if we really wanted, say, an AppleTalk filter, we are hosed,
3264 	 * but of course we don't have an AppleTalk filter to begin with.
3265 	 * (Note that since pfil doesn't understand ARP it will pass *ALL*
3266 	 * ARP traffic.)
3267 	 */
3268 	switch (ether_type) {
3269 		case ETHERTYPE_ARP:
3270 		case ETHERTYPE_REVARP:
3271 			if (V_pfil_ipfw_arp == 0)
3272 				return (0); /* Automatically pass */
3273 			break;
3274 
3275 		case ETHERTYPE_IP:
3276 #ifdef INET6
3277 		case ETHERTYPE_IPV6:
3278 #endif /* INET6 */
3279 			break;
3280 		default:
3281 			/*
3282 			 * Check to see if the user wants to pass non-ip
3283 			 * packets, these will not be checked by pfil(9) and
3284 			 * passed unconditionally so the default is to drop.
3285 			 */
3286 			if (V_pfil_onlyip)
3287 				goto bad;
3288 	}
3289 
3290 	/* Run the packet through pfil before stripping link headers */
3291 	if (PFIL_HOOKED_OUT(V_link_pfil_head) && V_pfil_ipfw != 0 &&
3292 	    dir == PFIL_OUT && ifp != NULL) {
3293 		switch (pfil_run_hooks(V_link_pfil_head, mp, ifp, dir, NULL)) {
3294 		case PFIL_DROPPED:
3295 			return (EACCES);
3296 		case PFIL_CONSUMED:
3297 			return (0);
3298 		}
3299 	}
3300 
3301 	/* Strip off the Ethernet header and keep a copy. */
3302 	m_copydata(*mp, 0, ETHER_HDR_LEN, (caddr_t) &eh2);
3303 	m_adj(*mp, ETHER_HDR_LEN);
3304 
3305 	/* Strip off snap header, if present */
3306 	if (snap) {
3307 		m_copydata(*mp, 0, sizeof(struct llc), (caddr_t) &llc1);
3308 		m_adj(*mp, sizeof(struct llc));
3309 	}
3310 
3311 	/*
3312 	 * Check the IP header for alignment and errors
3313 	 */
3314 	if (dir == PFIL_IN) {
3315 		switch (ether_type) {
3316 			case ETHERTYPE_IP:
3317 				error = bridge_ip_checkbasic(mp);
3318 				break;
3319 #ifdef INET6
3320 			case ETHERTYPE_IPV6:
3321 				error = bridge_ip6_checkbasic(mp);
3322 				break;
3323 #endif /* INET6 */
3324 			default:
3325 				error = 0;
3326 		}
3327 		if (error)
3328 			goto bad;
3329 	}
3330 
3331 	error = 0;
3332 
3333 	/*
3334 	 * Run the packet through pfil
3335 	 */
3336 	rv = PFIL_PASS;
3337 	switch (ether_type) {
3338 	case ETHERTYPE_IP:
3339 		/*
3340 		 * Run pfil on the member interface and the bridge, both can
3341 		 * be skipped by clearing pfil_member or pfil_bridge.
3342 		 *
3343 		 * Keep the order:
3344 		 *   in_if -> bridge_if -> out_if
3345 		 */
3346 		if (V_pfil_bridge && dir == PFIL_OUT && bifp != NULL && (rv =
3347 		    pfil_run_hooks(V_inet_pfil_head, mp, bifp, dir, NULL)) !=
3348 		    PFIL_PASS)
3349 			break;
3350 
3351 		if (V_pfil_member && ifp != NULL && (rv =
3352 		    pfil_run_hooks(V_inet_pfil_head, mp, ifp, dir, NULL)) !=
3353 		    PFIL_PASS)
3354 			break;
3355 
3356 		if (V_pfil_bridge && dir == PFIL_IN && bifp != NULL && (rv =
3357 		    pfil_run_hooks(V_inet_pfil_head, mp, bifp, dir, NULL)) !=
3358 		    PFIL_PASS)
3359 			break;
3360 
3361 		/* check if we need to fragment the packet */
3362 		/* bridge_fragment generates a mbuf chain of packets */
3363 		/* that already include eth headers */
3364 		if (V_pfil_member && ifp != NULL && dir == PFIL_OUT) {
3365 			i = (*mp)->m_pkthdr.len;
3366 			if (i > ifp->if_mtu) {
3367 				error = bridge_fragment(ifp, mp, &eh2, snap,
3368 					    &llc1);
3369 				return (error);
3370 			}
3371 		}
3372 
3373 		/* Recalculate the ip checksum. */
3374 		ip = mtod(*mp, struct ip *);
3375 		hlen = ip->ip_hl << 2;
3376 		if (hlen < sizeof(struct ip))
3377 			goto bad;
3378 		if (hlen > (*mp)->m_len) {
3379 			if ((*mp = m_pullup(*mp, hlen)) == NULL)
3380 				goto bad;
3381 			ip = mtod(*mp, struct ip *);
3382 			if (ip == NULL)
3383 				goto bad;
3384 		}
3385 		ip->ip_sum = 0;
3386 		if (hlen == sizeof(struct ip))
3387 			ip->ip_sum = in_cksum_hdr(ip);
3388 		else
3389 			ip->ip_sum = in_cksum(*mp, hlen);
3390 
3391 		break;
3392 #ifdef INET6
3393 	case ETHERTYPE_IPV6:
3394 		if (V_pfil_bridge && dir == PFIL_OUT && bifp != NULL && (rv =
3395 		    pfil_run_hooks(V_inet6_pfil_head, mp, bifp, dir, NULL)) !=
3396 		    PFIL_PASS)
3397 			break;
3398 
3399 		if (V_pfil_member && ifp != NULL && (rv =
3400 		    pfil_run_hooks(V_inet6_pfil_head, mp, ifp, dir, NULL)) !=
3401 		    PFIL_PASS)
3402 			break;
3403 
3404 		if (V_pfil_bridge && dir == PFIL_IN && bifp != NULL && (rv =
3405 		    pfil_run_hooks(V_inet6_pfil_head, mp, bifp, dir, NULL)) !=
3406 		    PFIL_PASS)
3407 			break;
3408 		break;
3409 #endif
3410 	}
3411 
3412 	switch (rv) {
3413 	case PFIL_CONSUMED:
3414 		return (0);
3415 	case PFIL_DROPPED:
3416 		return (EACCES);
3417 	default:
3418 		break;
3419 	}
3420 
3421 	error = -1;
3422 
3423 	/*
3424 	 * Finally, put everything back the way it was and return
3425 	 */
3426 	if (snap) {
3427 		M_PREPEND(*mp, sizeof(struct llc), M_NOWAIT);
3428 		if (*mp == NULL)
3429 			return (error);
3430 		bcopy(&llc1, mtod(*mp, caddr_t), sizeof(struct llc));
3431 	}
3432 
3433 	M_PREPEND(*mp, ETHER_HDR_LEN, M_NOWAIT);
3434 	if (*mp == NULL)
3435 		return (error);
3436 	bcopy(&eh2, mtod(*mp, caddr_t), ETHER_HDR_LEN);
3437 
3438 	return (0);
3439 
3440 bad:
3441 	m_freem(*mp);
3442 	*mp = NULL;
3443 	return (error);
3444 }
3445 
3446 /*
3447  * Perform basic checks on header size since
3448  * pfil assumes ip_input has already processed
3449  * it for it.  Cut-and-pasted from ip_input.c.
3450  * Given how simple the IPv6 version is,
3451  * does the IPv4 version really need to be
3452  * this complicated?
3453  *
3454  * XXX Should we update ipstat here, or not?
3455  * XXX Right now we update ipstat but not
3456  * XXX csum_counter.
3457  */
3458 static int
3459 bridge_ip_checkbasic(struct mbuf **mp)
3460 {
3461 	struct mbuf *m = *mp;
3462 	struct ip *ip;
3463 	int len, hlen;
3464 	u_short sum;
3465 
3466 	if (*mp == NULL)
3467 		return (-1);
3468 
3469 	if (IP_HDR_ALIGNED_P(mtod(m, caddr_t)) == 0) {
3470 		if ((m = m_copyup(m, sizeof(struct ip),
3471 			(max_linkhdr + 3) & ~3)) == NULL) {
3472 			/* XXXJRT new stat, please */
3473 			KMOD_IPSTAT_INC(ips_toosmall);
3474 			goto bad;
3475 		}
3476 	} else if (__predict_false(m->m_len < sizeof (struct ip))) {
3477 		if ((m = m_pullup(m, sizeof (struct ip))) == NULL) {
3478 			KMOD_IPSTAT_INC(ips_toosmall);
3479 			goto bad;
3480 		}
3481 	}
3482 	ip = mtod(m, struct ip *);
3483 	if (ip == NULL) goto bad;
3484 
3485 	if (ip->ip_v != IPVERSION) {
3486 		KMOD_IPSTAT_INC(ips_badvers);
3487 		goto bad;
3488 	}
3489 	hlen = ip->ip_hl << 2;
3490 	if (hlen < sizeof(struct ip)) { /* minimum header length */
3491 		KMOD_IPSTAT_INC(ips_badhlen);
3492 		goto bad;
3493 	}
3494 	if (hlen > m->m_len) {
3495 		if ((m = m_pullup(m, hlen)) == NULL) {
3496 			KMOD_IPSTAT_INC(ips_badhlen);
3497 			goto bad;
3498 		}
3499 		ip = mtod(m, struct ip *);
3500 		if (ip == NULL) goto bad;
3501 	}
3502 
3503 	if (m->m_pkthdr.csum_flags & CSUM_IP_CHECKED) {
3504 		sum = !(m->m_pkthdr.csum_flags & CSUM_IP_VALID);
3505 	} else {
3506 		if (hlen == sizeof(struct ip)) {
3507 			sum = in_cksum_hdr(ip);
3508 		} else {
3509 			sum = in_cksum(m, hlen);
3510 		}
3511 	}
3512 	if (sum) {
3513 		KMOD_IPSTAT_INC(ips_badsum);
3514 		goto bad;
3515 	}
3516 
3517 	/* Retrieve the packet length. */
3518 	len = ntohs(ip->ip_len);
3519 
3520 	/*
3521 	 * Check for additional length bogosity
3522 	 */
3523 	if (len < hlen) {
3524 		KMOD_IPSTAT_INC(ips_badlen);
3525 		goto bad;
3526 	}
3527 
3528 	/*
3529 	 * Check that the amount of data in the buffers
3530 	 * is as at least much as the IP header would have us expect.
3531 	 * Drop packet if shorter than we expect.
3532 	 */
3533 	if (m->m_pkthdr.len < len) {
3534 		KMOD_IPSTAT_INC(ips_tooshort);
3535 		goto bad;
3536 	}
3537 
3538 	/* Checks out, proceed */
3539 	*mp = m;
3540 	return (0);
3541 
3542 bad:
3543 	*mp = m;
3544 	return (-1);
3545 }
3546 
3547 #ifdef INET6
3548 /*
3549  * Same as above, but for IPv6.
3550  * Cut-and-pasted from ip6_input.c.
3551  * XXX Should we update ip6stat, or not?
3552  */
3553 static int
3554 bridge_ip6_checkbasic(struct mbuf **mp)
3555 {
3556 	struct mbuf *m = *mp;
3557 	struct ip6_hdr *ip6;
3558 
3559 	/*
3560 	 * If the IPv6 header is not aligned, slurp it up into a new
3561 	 * mbuf with space for link headers, in the event we forward
3562 	 * it.  Otherwise, if it is aligned, make sure the entire base
3563 	 * IPv6 header is in the first mbuf of the chain.
3564 	 */
3565 	if (IP6_HDR_ALIGNED_P(mtod(m, caddr_t)) == 0) {
3566 		struct ifnet *inifp = m->m_pkthdr.rcvif;
3567 		if ((m = m_copyup(m, sizeof(struct ip6_hdr),
3568 			    (max_linkhdr + 3) & ~3)) == NULL) {
3569 			/* XXXJRT new stat, please */
3570 			IP6STAT_INC(ip6s_toosmall);
3571 			in6_ifstat_inc(inifp, ifs6_in_hdrerr);
3572 			goto bad;
3573 		}
3574 	} else if (__predict_false(m->m_len < sizeof(struct ip6_hdr))) {
3575 		struct ifnet *inifp = m->m_pkthdr.rcvif;
3576 		if ((m = m_pullup(m, sizeof(struct ip6_hdr))) == NULL) {
3577 			IP6STAT_INC(ip6s_toosmall);
3578 			in6_ifstat_inc(inifp, ifs6_in_hdrerr);
3579 			goto bad;
3580 		}
3581 	}
3582 
3583 	ip6 = mtod(m, struct ip6_hdr *);
3584 
3585 	if ((ip6->ip6_vfc & IPV6_VERSION_MASK) != IPV6_VERSION) {
3586 		IP6STAT_INC(ip6s_badvers);
3587 		in6_ifstat_inc(m->m_pkthdr.rcvif, ifs6_in_hdrerr);
3588 		goto bad;
3589 	}
3590 
3591 	/* Checks out, proceed */
3592 	*mp = m;
3593 	return (0);
3594 
3595 bad:
3596 	*mp = m;
3597 	return (-1);
3598 }
3599 #endif /* INET6 */
3600 
3601 /*
3602  * bridge_fragment:
3603  *
3604  *	Fragment mbuf chain in multiple packets and prepend ethernet header.
3605  */
3606 static int
3607 bridge_fragment(struct ifnet *ifp, struct mbuf **mp, struct ether_header *eh,
3608     int snap, struct llc *llc)
3609 {
3610 	struct mbuf *m = *mp, *nextpkt = NULL, *mprev = NULL, *mcur = NULL;
3611 	struct ip *ip;
3612 	int error = -1;
3613 
3614 	if (m->m_len < sizeof(struct ip) &&
3615 	    (m = m_pullup(m, sizeof(struct ip))) == NULL)
3616 		goto dropit;
3617 	ip = mtod(m, struct ip *);
3618 
3619 	m->m_pkthdr.csum_flags |= CSUM_IP;
3620 	error = ip_fragment(ip, &m, ifp->if_mtu, ifp->if_hwassist);
3621 	if (error)
3622 		goto dropit;
3623 
3624 	/*
3625 	 * Walk the chain and re-add the Ethernet header for
3626 	 * each mbuf packet.
3627 	 */
3628 	for (mcur = m; mcur; mcur = mcur->m_nextpkt) {
3629 		nextpkt = mcur->m_nextpkt;
3630 		mcur->m_nextpkt = NULL;
3631 		if (snap) {
3632 			M_PREPEND(mcur, sizeof(struct llc), M_NOWAIT);
3633 			if (mcur == NULL) {
3634 				error = ENOBUFS;
3635 				if (mprev != NULL)
3636 					mprev->m_nextpkt = nextpkt;
3637 				goto dropit;
3638 			}
3639 			bcopy(llc, mtod(mcur, caddr_t),sizeof(struct llc));
3640 		}
3641 
3642 		M_PREPEND(mcur, ETHER_HDR_LEN, M_NOWAIT);
3643 		if (mcur == NULL) {
3644 			error = ENOBUFS;
3645 			if (mprev != NULL)
3646 				mprev->m_nextpkt = nextpkt;
3647 			goto dropit;
3648 		}
3649 		bcopy(eh, mtod(mcur, caddr_t), ETHER_HDR_LEN);
3650 
3651 		/*
3652 		 * The previous two M_PREPEND could have inserted one or two
3653 		 * mbufs in front so we have to update the previous packet's
3654 		 * m_nextpkt.
3655 		 */
3656 		mcur->m_nextpkt = nextpkt;
3657 		if (mprev != NULL)
3658 			mprev->m_nextpkt = mcur;
3659 		else {
3660 			/* The first mbuf in the original chain needs to be
3661 			 * updated. */
3662 			*mp = mcur;
3663 		}
3664 		mprev = mcur;
3665 	}
3666 
3667 	KMOD_IPSTAT_INC(ips_fragmented);
3668 	return (error);
3669 
3670 dropit:
3671 	for (mcur = *mp; mcur; mcur = m) { /* droping the full packet chain */
3672 		m = mcur->m_nextpkt;
3673 		m_freem(mcur);
3674 	}
3675 	return (error);
3676 }
3677 
3678 static void
3679 bridge_linkstate(struct ifnet *ifp)
3680 {
3681 	struct bridge_softc *sc = ifp->if_bridge;
3682 	struct bridge_iflist *bif;
3683 	struct epoch_tracker et;
3684 
3685 	NET_EPOCH_ENTER(et);
3686 
3687 	bif = bridge_lookup_member_if(sc, ifp);
3688 	if (bif == NULL) {
3689 		NET_EPOCH_EXIT(et);
3690 		return;
3691 	}
3692 	bridge_linkcheck(sc);
3693 
3694 	bstp_linkstate(&bif->bif_stp);
3695 
3696 	NET_EPOCH_EXIT(et);
3697 }
3698 
3699 static void
3700 bridge_linkcheck(struct bridge_softc *sc)
3701 {
3702 	struct bridge_iflist *bif;
3703 	int new_link, hasls;
3704 
3705 	BRIDGE_LOCK_OR_NET_EPOCH_ASSERT(sc);
3706 
3707 	new_link = LINK_STATE_DOWN;
3708 	hasls = 0;
3709 	/* Our link is considered up if at least one of our ports is active */
3710 	CK_LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
3711 		if (bif->bif_ifp->if_capabilities & IFCAP_LINKSTATE)
3712 			hasls++;
3713 		if (bif->bif_ifp->if_link_state == LINK_STATE_UP) {
3714 			new_link = LINK_STATE_UP;
3715 			break;
3716 		}
3717 	}
3718 	if (!CK_LIST_EMPTY(&sc->sc_iflist) && !hasls) {
3719 		/* If no interfaces support link-state then we default to up */
3720 		new_link = LINK_STATE_UP;
3721 	}
3722 	if_link_state_change(sc->sc_ifp, new_link);
3723 }
3724