xref: /freebsd/sys/net/if_bridge.c (revision 4e99f45480598189d49d45a825533a6c9e12f02c)
1 /*	$NetBSD: if_bridge.c,v 1.31 2005/06/01 19:45:34 jdc Exp $	*/
2 
3 /*-
4  * SPDX-License-Identifier: BSD-4-Clause
5  *
6  * Copyright 2001 Wasabi Systems, Inc.
7  * All rights reserved.
8  *
9  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  * 3. All advertising materials mentioning features or use of this software
20  *    must display the following acknowledgement:
21  *	This product includes software developed for the NetBSD Project by
22  *	Wasabi Systems, Inc.
23  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
24  *    or promote products derived from this software without specific prior
25  *    written permission.
26  *
27  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
28  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
31  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37  * POSSIBILITY OF SUCH DAMAGE.
38  */
39 
40 /*
41  * Copyright (c) 1999, 2000 Jason L. Wright (jason@thought.net)
42  * All rights reserved.
43  *
44  * Redistribution and use in source and binary forms, with or without
45  * modification, are permitted provided that the following conditions
46  * are met:
47  * 1. Redistributions of source code must retain the above copyright
48  *    notice, this list of conditions and the following disclaimer.
49  * 2. Redistributions in binary form must reproduce the above copyright
50  *    notice, this list of conditions and the following disclaimer in the
51  *    documentation and/or other materials provided with the distribution.
52  *
53  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
54  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
55  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
56  * DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
57  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
58  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
59  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
60  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
61  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
62  * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
63  * POSSIBILITY OF SUCH DAMAGE.
64  *
65  * OpenBSD: if_bridge.c,v 1.60 2001/06/15 03:38:33 itojun Exp
66  */
67 
68 /*
69  * Network interface bridge support.
70  *
71  * TODO:
72  *
73  *	- Currently only supports Ethernet-like interfaces (Ethernet,
74  *	  802.11, VLANs on Ethernet, etc.)  Figure out a nice way
75  *	  to bridge other types of interfaces (maybe consider
76  *	  heterogeneous bridges).
77  */
78 
79 #include <sys/cdefs.h>
80 __FBSDID("$FreeBSD$");
81 
82 #include "opt_inet.h"
83 #include "opt_inet6.h"
84 
85 #include <sys/param.h>
86 #include <sys/eventhandler.h>
87 #include <sys/mbuf.h>
88 #include <sys/malloc.h>
89 #include <sys/protosw.h>
90 #include <sys/systm.h>
91 #include <sys/jail.h>
92 #include <sys/time.h>
93 #include <sys/socket.h> /* for net/if.h */
94 #include <sys/sockio.h>
95 #include <sys/ctype.h>  /* string functions */
96 #include <sys/kernel.h>
97 #include <sys/random.h>
98 #include <sys/syslog.h>
99 #include <sys/sysctl.h>
100 #include <vm/uma.h>
101 #include <sys/module.h>
102 #include <sys/priv.h>
103 #include <sys/proc.h>
104 #include <sys/lock.h>
105 #include <sys/mutex.h>
106 
107 #include <net/bpf.h>
108 #include <net/if.h>
109 #include <net/if_clone.h>
110 #include <net/if_dl.h>
111 #include <net/if_types.h>
112 #include <net/if_var.h>
113 #include <net/pfil.h>
114 #include <net/vnet.h>
115 
116 #include <netinet/in.h>
117 #include <netinet/in_systm.h>
118 #include <netinet/in_var.h>
119 #include <netinet/ip.h>
120 #include <netinet/ip_var.h>
121 #ifdef INET6
122 #include <netinet/ip6.h>
123 #include <netinet6/ip6_var.h>
124 #include <netinet6/in6_ifattach.h>
125 #endif
126 #if defined(INET) || defined(INET6)
127 #include <netinet/ip_carp.h>
128 #endif
129 #include <machine/in_cksum.h>
130 #include <netinet/if_ether.h>
131 #include <net/bridgestp.h>
132 #include <net/if_bridgevar.h>
133 #include <net/if_llc.h>
134 #include <net/if_vlan_var.h>
135 
136 #include <net/route.h>
137 
138 #ifdef INET6
139 /*
140  * XXX: declare here to avoid to include many inet6 related files..
141  * should be more generalized?
142  */
143 extern void	nd6_setmtu(struct ifnet *);
144 #endif
145 
146 /*
147  * Size of the route hash table.  Must be a power of two.
148  */
149 #ifndef BRIDGE_RTHASH_SIZE
150 #define	BRIDGE_RTHASH_SIZE		1024
151 #endif
152 
153 #define	BRIDGE_RTHASH_MASK		(BRIDGE_RTHASH_SIZE - 1)
154 
155 /*
156  * Default maximum number of addresses to cache.
157  */
158 #ifndef BRIDGE_RTABLE_MAX
159 #define	BRIDGE_RTABLE_MAX		2000
160 #endif
161 
162 /*
163  * Timeout (in seconds) for entries learned dynamically.
164  */
165 #ifndef BRIDGE_RTABLE_TIMEOUT
166 #define	BRIDGE_RTABLE_TIMEOUT		(20 * 60)	/* same as ARP */
167 #endif
168 
169 /*
170  * Number of seconds between walks of the route list.
171  */
172 #ifndef BRIDGE_RTABLE_PRUNE_PERIOD
173 #define	BRIDGE_RTABLE_PRUNE_PERIOD	(5 * 60)
174 #endif
175 
176 /*
177  * List of capabilities to possibly mask on the member interface.
178  */
179 #define	BRIDGE_IFCAPS_MASK		(IFCAP_TOE|IFCAP_TSO|IFCAP_TXCSUM|\
180 					 IFCAP_TXCSUM_IPV6)
181 
182 /*
183  * List of capabilities to strip
184  */
185 #define	BRIDGE_IFCAPS_STRIP		IFCAP_LRO
186 
187 /*
188  * Bridge locking
189  */
190 #define BRIDGE_LOCK_INIT(_sc)		do {			\
191 	mtx_init(&(_sc)->sc_mtx, "if_bridge", NULL, MTX_DEF);	\
192 } while (0)
193 #define BRIDGE_LOCK_DESTROY(_sc)	do {	\
194 	mtx_destroy(&(_sc)->sc_mtx);		\
195 } while (0)
196 #define BRIDGE_LOCK(_sc)		mtx_lock(&(_sc)->sc_mtx)
197 #define BRIDGE_UNLOCK(_sc)		mtx_unlock(&(_sc)->sc_mtx)
198 #define BRIDGE_LOCK_ASSERT(_sc)		mtx_assert(&(_sc)->sc_mtx, MA_OWNED)
199 #define BRIDGE_UNLOCK_ASSERT(_sc)	mtx_assert(&(_sc)->sc_mtx, MA_NOTOWNED)
200 
201 /*
202  * Bridge interface list entry.
203  */
204 struct bridge_iflist {
205 	CK_LIST_ENTRY(bridge_iflist) bif_next;
206 	struct ifnet		*bif_ifp;	/* member if */
207 	struct bstp_port	bif_stp;	/* STP state */
208 	uint32_t		bif_flags;	/* member if flags */
209 	int			bif_savedcaps;	/* saved capabilities */
210 	uint32_t		bif_addrmax;	/* max # of addresses */
211 	uint32_t		bif_addrcnt;	/* cur. # of addresses */
212 	uint32_t		bif_addrexceeded;/* # of address violations */
213 	struct epoch_context	bif_epoch_ctx;
214 };
215 
216 /*
217  * Bridge route node.
218  */
219 struct bridge_rtnode {
220 	CK_LIST_ENTRY(bridge_rtnode) brt_hash;	/* hash table linkage */
221 	CK_LIST_ENTRY(bridge_rtnode) brt_list;	/* list linkage */
222 	struct bridge_iflist	*brt_dst;	/* destination if */
223 	unsigned long		brt_expire;	/* expiration time */
224 	uint8_t			brt_flags;	/* address flags */
225 	uint8_t			brt_addr[ETHER_ADDR_LEN];
226 	uint16_t		brt_vlan;	/* vlan id */
227 	struct	vnet		*brt_vnet;
228 	struct	epoch_context	brt_epoch_ctx;
229 };
230 #define	brt_ifp			brt_dst->bif_ifp
231 
232 /*
233  * Software state for each bridge.
234  */
235 struct bridge_softc {
236 	struct ifnet		*sc_ifp;	/* make this an interface */
237 	LIST_ENTRY(bridge_softc) sc_list;
238 	struct mtx		sc_mtx;
239 	uint32_t		sc_brtmax;	/* max # of addresses */
240 	uint32_t		sc_brtcnt;	/* cur. # of addresses */
241 	uint32_t		sc_brttimeout;	/* rt timeout in seconds */
242 	struct callout		sc_brcallout;	/* bridge callout */
243 	CK_LIST_HEAD(, bridge_iflist) sc_iflist;	/* member interface list */
244 	CK_LIST_HEAD(, bridge_rtnode) *sc_rthash;	/* our forwarding table */
245 	CK_LIST_HEAD(, bridge_rtnode) sc_rtlist;	/* list version of above */
246 	uint32_t		sc_rthash_key;	/* key for hash */
247 	CK_LIST_HEAD(, bridge_iflist) sc_spanlist;	/* span ports list */
248 	struct bstp_state	sc_stp;		/* STP state */
249 	uint32_t		sc_brtexceeded;	/* # of cache drops */
250 	struct ifnet		*sc_ifaddr;	/* member mac copied from */
251 	struct ether_addr	sc_defaddr;	/* Default MAC address */
252 	struct epoch_context	sc_epoch_ctx;
253 };
254 
255 VNET_DEFINE_STATIC(struct mtx, bridge_list_mtx);
256 #define	V_bridge_list_mtx	VNET(bridge_list_mtx)
257 static eventhandler_tag bridge_detach_cookie;
258 
259 int	bridge_rtable_prune_period = BRIDGE_RTABLE_PRUNE_PERIOD;
260 
261 VNET_DEFINE_STATIC(uma_zone_t, bridge_rtnode_zone);
262 #define	V_bridge_rtnode_zone	VNET(bridge_rtnode_zone)
263 
264 static int	bridge_clone_create(struct if_clone *, int, caddr_t);
265 static void	bridge_clone_destroy(struct ifnet *);
266 
267 static int	bridge_ioctl(struct ifnet *, u_long, caddr_t);
268 static void	bridge_mutecaps(struct bridge_softc *);
269 static void	bridge_set_ifcap(struct bridge_softc *, struct bridge_iflist *,
270 		    int);
271 static void	bridge_ifdetach(void *arg __unused, struct ifnet *);
272 static void	bridge_init(void *);
273 static void	bridge_dummynet(struct mbuf *, struct ifnet *);
274 static void	bridge_stop(struct ifnet *, int);
275 static int	bridge_transmit(struct ifnet *, struct mbuf *);
276 static void	bridge_qflush(struct ifnet *);
277 static struct mbuf *bridge_input(struct ifnet *, struct mbuf *);
278 static int	bridge_output(struct ifnet *, struct mbuf *, struct sockaddr *,
279 		    struct rtentry *);
280 static int	bridge_enqueue(struct bridge_softc *, struct ifnet *,
281 		    struct mbuf *);
282 static void	bridge_rtdelete(struct bridge_softc *, struct ifnet *ifp, int);
283 
284 static void	bridge_forward(struct bridge_softc *, struct bridge_iflist *,
285 		    struct mbuf *m);
286 
287 static void	bridge_timer(void *);
288 
289 static void	bridge_broadcast(struct bridge_softc *, struct ifnet *,
290 		    struct mbuf *, int);
291 static void	bridge_span(struct bridge_softc *, struct mbuf *);
292 
293 static int	bridge_rtupdate(struct bridge_softc *, const uint8_t *,
294 		    uint16_t, struct bridge_iflist *, int, uint8_t);
295 static struct ifnet *bridge_rtlookup(struct bridge_softc *, const uint8_t *,
296 		    uint16_t);
297 static void	bridge_rttrim(struct bridge_softc *);
298 static void	bridge_rtage(struct bridge_softc *);
299 static void	bridge_rtflush(struct bridge_softc *, int);
300 static int	bridge_rtdaddr(struct bridge_softc *, const uint8_t *,
301 		    uint16_t);
302 
303 static void	bridge_rtable_init(struct bridge_softc *);
304 static void	bridge_rtable_fini(struct bridge_softc *);
305 
306 static int	bridge_rtnode_addr_cmp(const uint8_t *, const uint8_t *);
307 static struct bridge_rtnode *bridge_rtnode_lookup(struct bridge_softc *,
308 		    const uint8_t *, uint16_t);
309 static int	bridge_rtnode_insert(struct bridge_softc *,
310 		    struct bridge_rtnode *);
311 static void	bridge_rtnode_destroy(struct bridge_softc *,
312 		    struct bridge_rtnode *);
313 static void	bridge_rtable_expire(struct ifnet *, int);
314 static void	bridge_state_change(struct ifnet *, int);
315 
316 static struct bridge_iflist *bridge_lookup_member(struct bridge_softc *,
317 		    const char *name);
318 static struct bridge_iflist *bridge_lookup_member_if(struct bridge_softc *,
319 		    struct ifnet *ifp);
320 static void	bridge_delete_member(struct bridge_softc *,
321 		    struct bridge_iflist *, int);
322 static void	bridge_delete_span(struct bridge_softc *,
323 		    struct bridge_iflist *);
324 
325 static int	bridge_ioctl_add(struct bridge_softc *, void *);
326 static int	bridge_ioctl_del(struct bridge_softc *, void *);
327 static int	bridge_ioctl_gifflags(struct bridge_softc *, void *);
328 static int	bridge_ioctl_sifflags(struct bridge_softc *, void *);
329 static int	bridge_ioctl_scache(struct bridge_softc *, void *);
330 static int	bridge_ioctl_gcache(struct bridge_softc *, void *);
331 static int	bridge_ioctl_gifs(struct bridge_softc *, void *);
332 static int	bridge_ioctl_rts(struct bridge_softc *, void *);
333 static int	bridge_ioctl_saddr(struct bridge_softc *, void *);
334 static int	bridge_ioctl_sto(struct bridge_softc *, void *);
335 static int	bridge_ioctl_gto(struct bridge_softc *, void *);
336 static int	bridge_ioctl_daddr(struct bridge_softc *, void *);
337 static int	bridge_ioctl_flush(struct bridge_softc *, void *);
338 static int	bridge_ioctl_gpri(struct bridge_softc *, void *);
339 static int	bridge_ioctl_spri(struct bridge_softc *, void *);
340 static int	bridge_ioctl_ght(struct bridge_softc *, void *);
341 static int	bridge_ioctl_sht(struct bridge_softc *, void *);
342 static int	bridge_ioctl_gfd(struct bridge_softc *, void *);
343 static int	bridge_ioctl_sfd(struct bridge_softc *, void *);
344 static int	bridge_ioctl_gma(struct bridge_softc *, void *);
345 static int	bridge_ioctl_sma(struct bridge_softc *, void *);
346 static int	bridge_ioctl_sifprio(struct bridge_softc *, void *);
347 static int	bridge_ioctl_sifcost(struct bridge_softc *, void *);
348 static int	bridge_ioctl_sifmaxaddr(struct bridge_softc *, void *);
349 static int	bridge_ioctl_addspan(struct bridge_softc *, void *);
350 static int	bridge_ioctl_delspan(struct bridge_softc *, void *);
351 static int	bridge_ioctl_gbparam(struct bridge_softc *, void *);
352 static int	bridge_ioctl_grte(struct bridge_softc *, void *);
353 static int	bridge_ioctl_gifsstp(struct bridge_softc *, void *);
354 static int	bridge_ioctl_sproto(struct bridge_softc *, void *);
355 static int	bridge_ioctl_stxhc(struct bridge_softc *, void *);
356 static int	bridge_pfil(struct mbuf **, struct ifnet *, struct ifnet *,
357 		    int);
358 static int	bridge_ip_checkbasic(struct mbuf **mp);
359 #ifdef INET6
360 static int	bridge_ip6_checkbasic(struct mbuf **mp);
361 #endif /* INET6 */
362 static int	bridge_fragment(struct ifnet *, struct mbuf **mp,
363 		    struct ether_header *, int, struct llc *);
364 static void	bridge_linkstate(struct ifnet *ifp);
365 static void	bridge_linkcheck(struct bridge_softc *sc);
366 
367 
368 /* The default bridge vlan is 1 (IEEE 802.1Q-2003 Table 9-2) */
369 #define	VLANTAGOF(_m)	\
370     (_m->m_flags & M_VLANTAG) ? EVL_VLANOFTAG(_m->m_pkthdr.ether_vtag) : 1
371 
372 static struct bstp_cb_ops bridge_ops = {
373 	.bcb_state = bridge_state_change,
374 	.bcb_rtage = bridge_rtable_expire
375 };
376 
377 SYSCTL_DECL(_net_link);
378 static SYSCTL_NODE(_net_link, IFT_BRIDGE, bridge, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
379     "Bridge");
380 
381 /* only pass IP[46] packets when pfil is enabled */
382 VNET_DEFINE_STATIC(int, pfil_onlyip) = 1;
383 #define	V_pfil_onlyip	VNET(pfil_onlyip)
384 SYSCTL_INT(_net_link_bridge, OID_AUTO, pfil_onlyip,
385     CTLFLAG_RWTUN | CTLFLAG_VNET, &VNET_NAME(pfil_onlyip), 0,
386     "Only pass IP packets when pfil is enabled");
387 
388 /* run pfil hooks on the bridge interface */
389 VNET_DEFINE_STATIC(int, pfil_bridge) = 1;
390 #define	V_pfil_bridge	VNET(pfil_bridge)
391 SYSCTL_INT(_net_link_bridge, OID_AUTO, pfil_bridge,
392     CTLFLAG_RWTUN | CTLFLAG_VNET, &VNET_NAME(pfil_bridge), 0,
393     "Packet filter on the bridge interface");
394 
395 /* layer2 filter with ipfw */
396 VNET_DEFINE_STATIC(int, pfil_ipfw);
397 #define	V_pfil_ipfw	VNET(pfil_ipfw)
398 
399 /* layer2 ARP filter with ipfw */
400 VNET_DEFINE_STATIC(int, pfil_ipfw_arp);
401 #define	V_pfil_ipfw_arp	VNET(pfil_ipfw_arp)
402 SYSCTL_INT(_net_link_bridge, OID_AUTO, ipfw_arp,
403     CTLFLAG_RWTUN | CTLFLAG_VNET, &VNET_NAME(pfil_ipfw_arp), 0,
404     "Filter ARP packets through IPFW layer2");
405 
406 /* run pfil hooks on the member interface */
407 VNET_DEFINE_STATIC(int, pfil_member) = 1;
408 #define	V_pfil_member	VNET(pfil_member)
409 SYSCTL_INT(_net_link_bridge, OID_AUTO, pfil_member,
410     CTLFLAG_RWTUN | CTLFLAG_VNET, &VNET_NAME(pfil_member), 0,
411     "Packet filter on the member interface");
412 
413 /* run pfil hooks on the physical interface for locally destined packets */
414 VNET_DEFINE_STATIC(int, pfil_local_phys);
415 #define	V_pfil_local_phys	VNET(pfil_local_phys)
416 SYSCTL_INT(_net_link_bridge, OID_AUTO, pfil_local_phys,
417     CTLFLAG_RWTUN | CTLFLAG_VNET, &VNET_NAME(pfil_local_phys), 0,
418     "Packet filter on the physical interface for locally destined packets");
419 
420 /* log STP state changes */
421 VNET_DEFINE_STATIC(int, log_stp);
422 #define	V_log_stp	VNET(log_stp)
423 SYSCTL_INT(_net_link_bridge, OID_AUTO, log_stp,
424     CTLFLAG_RWTUN | CTLFLAG_VNET, &VNET_NAME(log_stp), 0,
425     "Log STP state changes");
426 
427 /* share MAC with first bridge member */
428 VNET_DEFINE_STATIC(int, bridge_inherit_mac);
429 #define	V_bridge_inherit_mac	VNET(bridge_inherit_mac)
430 SYSCTL_INT(_net_link_bridge, OID_AUTO, inherit_mac,
431     CTLFLAG_RWTUN | CTLFLAG_VNET, &VNET_NAME(bridge_inherit_mac), 0,
432     "Inherit MAC address from the first bridge member");
433 
434 VNET_DEFINE_STATIC(int, allow_llz_overlap) = 0;
435 #define	V_allow_llz_overlap	VNET(allow_llz_overlap)
436 SYSCTL_INT(_net_link_bridge, OID_AUTO, allow_llz_overlap,
437     CTLFLAG_RW | CTLFLAG_VNET, &VNET_NAME(allow_llz_overlap), 0,
438     "Allow overlap of link-local scope "
439     "zones of a bridge interface and the member interfaces");
440 
441 struct bridge_control {
442 	int	(*bc_func)(struct bridge_softc *, void *);
443 	int	bc_argsize;
444 	int	bc_flags;
445 };
446 
447 #define	BC_F_COPYIN		0x01	/* copy arguments in */
448 #define	BC_F_COPYOUT		0x02	/* copy arguments out */
449 #define	BC_F_SUSER		0x04	/* do super-user check */
450 
451 const struct bridge_control bridge_control_table[] = {
452 	{ bridge_ioctl_add,		sizeof(struct ifbreq),
453 	  BC_F_COPYIN|BC_F_SUSER },
454 	{ bridge_ioctl_del,		sizeof(struct ifbreq),
455 	  BC_F_COPYIN|BC_F_SUSER },
456 
457 	{ bridge_ioctl_gifflags,	sizeof(struct ifbreq),
458 	  BC_F_COPYIN|BC_F_COPYOUT },
459 	{ bridge_ioctl_sifflags,	sizeof(struct ifbreq),
460 	  BC_F_COPYIN|BC_F_SUSER },
461 
462 	{ bridge_ioctl_scache,		sizeof(struct ifbrparam),
463 	  BC_F_COPYIN|BC_F_SUSER },
464 	{ bridge_ioctl_gcache,		sizeof(struct ifbrparam),
465 	  BC_F_COPYOUT },
466 
467 	{ bridge_ioctl_gifs,		sizeof(struct ifbifconf),
468 	  BC_F_COPYIN|BC_F_COPYOUT },
469 	{ bridge_ioctl_rts,		sizeof(struct ifbaconf),
470 	  BC_F_COPYIN|BC_F_COPYOUT },
471 
472 	{ bridge_ioctl_saddr,		sizeof(struct ifbareq),
473 	  BC_F_COPYIN|BC_F_SUSER },
474 
475 	{ bridge_ioctl_sto,		sizeof(struct ifbrparam),
476 	  BC_F_COPYIN|BC_F_SUSER },
477 	{ bridge_ioctl_gto,		sizeof(struct ifbrparam),
478 	  BC_F_COPYOUT },
479 
480 	{ bridge_ioctl_daddr,		sizeof(struct ifbareq),
481 	  BC_F_COPYIN|BC_F_SUSER },
482 
483 	{ bridge_ioctl_flush,		sizeof(struct ifbreq),
484 	  BC_F_COPYIN|BC_F_SUSER },
485 
486 	{ bridge_ioctl_gpri,		sizeof(struct ifbrparam),
487 	  BC_F_COPYOUT },
488 	{ bridge_ioctl_spri,		sizeof(struct ifbrparam),
489 	  BC_F_COPYIN|BC_F_SUSER },
490 
491 	{ bridge_ioctl_ght,		sizeof(struct ifbrparam),
492 	  BC_F_COPYOUT },
493 	{ bridge_ioctl_sht,		sizeof(struct ifbrparam),
494 	  BC_F_COPYIN|BC_F_SUSER },
495 
496 	{ bridge_ioctl_gfd,		sizeof(struct ifbrparam),
497 	  BC_F_COPYOUT },
498 	{ bridge_ioctl_sfd,		sizeof(struct ifbrparam),
499 	  BC_F_COPYIN|BC_F_SUSER },
500 
501 	{ bridge_ioctl_gma,		sizeof(struct ifbrparam),
502 	  BC_F_COPYOUT },
503 	{ bridge_ioctl_sma,		sizeof(struct ifbrparam),
504 	  BC_F_COPYIN|BC_F_SUSER },
505 
506 	{ bridge_ioctl_sifprio,		sizeof(struct ifbreq),
507 	  BC_F_COPYIN|BC_F_SUSER },
508 
509 	{ bridge_ioctl_sifcost,		sizeof(struct ifbreq),
510 	  BC_F_COPYIN|BC_F_SUSER },
511 
512 	{ bridge_ioctl_addspan,		sizeof(struct ifbreq),
513 	  BC_F_COPYIN|BC_F_SUSER },
514 	{ bridge_ioctl_delspan,		sizeof(struct ifbreq),
515 	  BC_F_COPYIN|BC_F_SUSER },
516 
517 	{ bridge_ioctl_gbparam,		sizeof(struct ifbropreq),
518 	  BC_F_COPYOUT },
519 
520 	{ bridge_ioctl_grte,		sizeof(struct ifbrparam),
521 	  BC_F_COPYOUT },
522 
523 	{ bridge_ioctl_gifsstp,		sizeof(struct ifbpstpconf),
524 	  BC_F_COPYIN|BC_F_COPYOUT },
525 
526 	{ bridge_ioctl_sproto,		sizeof(struct ifbrparam),
527 	  BC_F_COPYIN|BC_F_SUSER },
528 
529 	{ bridge_ioctl_stxhc,		sizeof(struct ifbrparam),
530 	  BC_F_COPYIN|BC_F_SUSER },
531 
532 	{ bridge_ioctl_sifmaxaddr,	sizeof(struct ifbreq),
533 	  BC_F_COPYIN|BC_F_SUSER },
534 
535 };
536 const int bridge_control_table_size = nitems(bridge_control_table);
537 
538 VNET_DEFINE_STATIC(LIST_HEAD(, bridge_softc), bridge_list);
539 #define	V_bridge_list	VNET(bridge_list)
540 #define	BRIDGE_LIST_LOCK_INIT(x)	mtx_init(&V_bridge_list_mtx,	\
541 					    "if_bridge list", NULL, MTX_DEF)
542 #define	BRIDGE_LIST_LOCK_DESTROY(x)	mtx_destroy(&V_bridge_list_mtx)
543 #define	BRIDGE_LIST_LOCK(x)		mtx_lock(&V_bridge_list_mtx)
544 #define	BRIDGE_LIST_UNLOCK(x)		mtx_unlock(&V_bridge_list_mtx)
545 
546 VNET_DEFINE_STATIC(struct if_clone *, bridge_cloner);
547 #define	V_bridge_cloner	VNET(bridge_cloner)
548 
549 static const char bridge_name[] = "bridge";
550 
551 static void
552 vnet_bridge_init(const void *unused __unused)
553 {
554 
555 	V_bridge_rtnode_zone = uma_zcreate("bridge_rtnode",
556 	    sizeof(struct bridge_rtnode), NULL, NULL, NULL, NULL,
557 	    UMA_ALIGN_PTR, 0);
558 	BRIDGE_LIST_LOCK_INIT();
559 	LIST_INIT(&V_bridge_list);
560 	V_bridge_cloner = if_clone_simple(bridge_name,
561 	    bridge_clone_create, bridge_clone_destroy, 0);
562 }
563 VNET_SYSINIT(vnet_bridge_init, SI_SUB_PROTO_IFATTACHDOMAIN, SI_ORDER_ANY,
564     vnet_bridge_init, NULL);
565 
566 static void
567 vnet_bridge_uninit(const void *unused __unused)
568 {
569 
570 	if_clone_detach(V_bridge_cloner);
571 	V_bridge_cloner = NULL;
572 	BRIDGE_LIST_LOCK_DESTROY();
573 
574 	/* Callbacks may use the UMA zone. */
575 	epoch_drain_callbacks(net_epoch_preempt);
576 
577 	uma_zdestroy(V_bridge_rtnode_zone);
578 }
579 VNET_SYSUNINIT(vnet_bridge_uninit, SI_SUB_PSEUDO, SI_ORDER_ANY,
580     vnet_bridge_uninit, NULL);
581 
582 static int
583 bridge_modevent(module_t mod, int type, void *data)
584 {
585 
586 	switch (type) {
587 	case MOD_LOAD:
588 		bridge_dn_p = bridge_dummynet;
589 		bridge_detach_cookie = EVENTHANDLER_REGISTER(
590 		    ifnet_departure_event, bridge_ifdetach, NULL,
591 		    EVENTHANDLER_PRI_ANY);
592 		break;
593 	case MOD_UNLOAD:
594 		EVENTHANDLER_DEREGISTER(ifnet_departure_event,
595 		    bridge_detach_cookie);
596 		bridge_dn_p = NULL;
597 		break;
598 	default:
599 		return (EOPNOTSUPP);
600 	}
601 	return (0);
602 }
603 
604 static moduledata_t bridge_mod = {
605 	"if_bridge",
606 	bridge_modevent,
607 	0
608 };
609 
610 DECLARE_MODULE(if_bridge, bridge_mod, SI_SUB_PSEUDO, SI_ORDER_ANY);
611 MODULE_VERSION(if_bridge, 1);
612 MODULE_DEPEND(if_bridge, bridgestp, 1, 1, 1);
613 
614 /*
615  * handler for net.link.bridge.ipfw
616  */
617 static int
618 sysctl_pfil_ipfw(SYSCTL_HANDLER_ARGS)
619 {
620 	int enable = V_pfil_ipfw;
621 	int error;
622 
623 	error = sysctl_handle_int(oidp, &enable, 0, req);
624 	enable &= 1;
625 
626 	if (enable != V_pfil_ipfw) {
627 		V_pfil_ipfw = enable;
628 
629 		/*
630 		 * Disable pfil so that ipfw doesnt run twice, if the user
631 		 * really wants both then they can re-enable pfil_bridge and/or
632 		 * pfil_member. Also allow non-ip packets as ipfw can filter by
633 		 * layer2 type.
634 		 */
635 		if (V_pfil_ipfw) {
636 			V_pfil_onlyip = 0;
637 			V_pfil_bridge = 0;
638 			V_pfil_member = 0;
639 		}
640 	}
641 
642 	return (error);
643 }
644 SYSCTL_PROC(_net_link_bridge, OID_AUTO, ipfw,
645     CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_VNET | CTLFLAG_NEEDGIANT,
646     &VNET_NAME(pfil_ipfw), 0, &sysctl_pfil_ipfw, "I",
647     "Layer2 filter with IPFW");
648 
649 /*
650  * bridge_clone_create:
651  *
652  *	Create a new bridge instance.
653  */
654 static int
655 bridge_clone_create(struct if_clone *ifc, int unit, caddr_t params)
656 {
657 	struct bridge_softc *sc;
658 	struct ifnet *ifp;
659 
660 	sc = malloc(sizeof(*sc), M_DEVBUF, M_WAITOK|M_ZERO);
661 	ifp = sc->sc_ifp = if_alloc(IFT_ETHER);
662 	if (ifp == NULL) {
663 		free(sc, M_DEVBUF);
664 		return (ENOSPC);
665 	}
666 
667 	BRIDGE_LOCK_INIT(sc);
668 	sc->sc_brtmax = BRIDGE_RTABLE_MAX;
669 	sc->sc_brttimeout = BRIDGE_RTABLE_TIMEOUT;
670 
671 	/* Initialize our routing table. */
672 	bridge_rtable_init(sc);
673 
674 	callout_init_mtx(&sc->sc_brcallout, &sc->sc_mtx, 0);
675 
676 	CK_LIST_INIT(&sc->sc_iflist);
677 	CK_LIST_INIT(&sc->sc_spanlist);
678 
679 	ifp->if_softc = sc;
680 	if_initname(ifp, bridge_name, unit);
681 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
682 	ifp->if_ioctl = bridge_ioctl;
683 	ifp->if_transmit = bridge_transmit;
684 	ifp->if_qflush = bridge_qflush;
685 	ifp->if_init = bridge_init;
686 	ifp->if_type = IFT_BRIDGE;
687 
688 	ether_gen_addr(ifp, &sc->sc_defaddr);
689 
690 	bstp_attach(&sc->sc_stp, &bridge_ops);
691 	ether_ifattach(ifp, sc->sc_defaddr.octet);
692 	/* Now undo some of the damage... */
693 	ifp->if_baudrate = 0;
694 	ifp->if_type = IFT_BRIDGE;
695 
696 	BRIDGE_LIST_LOCK();
697 	LIST_INSERT_HEAD(&V_bridge_list, sc, sc_list);
698 	BRIDGE_LIST_UNLOCK();
699 
700 	return (0);
701 }
702 
703 static void
704 bridge_clone_destroy_cb(struct epoch_context *ctx)
705 {
706 	struct bridge_softc *sc;
707 
708 	sc = __containerof(ctx, struct bridge_softc, sc_epoch_ctx);
709 
710 	BRIDGE_LOCK_DESTROY(sc);
711 	free(sc, M_DEVBUF);
712 }
713 
714 /*
715  * bridge_clone_destroy:
716  *
717  *	Destroy a bridge instance.
718  */
719 static void
720 bridge_clone_destroy(struct ifnet *ifp)
721 {
722 	struct bridge_softc *sc = ifp->if_softc;
723 	struct bridge_iflist *bif;
724 	struct epoch_tracker et;
725 
726 	NET_EPOCH_ENTER(et);
727 	BRIDGE_LOCK(sc);
728 
729 	bridge_stop(ifp, 1);
730 	ifp->if_flags &= ~IFF_UP;
731 
732 	while ((bif = CK_LIST_FIRST(&sc->sc_iflist)) != NULL)
733 		bridge_delete_member(sc, bif, 0);
734 
735 	while ((bif = CK_LIST_FIRST(&sc->sc_spanlist)) != NULL) {
736 		bridge_delete_span(sc, bif);
737 	}
738 
739 	/* Tear down the routing table. */
740 	bridge_rtable_fini(sc);
741 
742 	BRIDGE_UNLOCK(sc);
743 
744 	callout_drain(&sc->sc_brcallout);
745 
746 	BRIDGE_LIST_LOCK();
747 	LIST_REMOVE(sc, sc_list);
748 	BRIDGE_LIST_UNLOCK();
749 
750 	bstp_detach(&sc->sc_stp);
751 	NET_EPOCH_EXIT(et);
752 
753 	ether_ifdetach(ifp);
754 	if_free(ifp);
755 
756 	NET_EPOCH_CALL(bridge_clone_destroy_cb, &sc->sc_epoch_ctx);
757 }
758 
759 /*
760  * bridge_ioctl:
761  *
762  *	Handle a control request from the operator.
763  */
764 static int
765 bridge_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
766 {
767 	struct bridge_softc *sc = ifp->if_softc;
768 	struct ifreq *ifr = (struct ifreq *)data;
769 	struct bridge_iflist *bif;
770 	struct thread *td = curthread;
771 	union {
772 		struct ifbreq ifbreq;
773 		struct ifbifconf ifbifconf;
774 		struct ifbareq ifbareq;
775 		struct ifbaconf ifbaconf;
776 		struct ifbrparam ifbrparam;
777 		struct ifbropreq ifbropreq;
778 	} args;
779 	struct ifdrv *ifd = (struct ifdrv *) data;
780 	const struct bridge_control *bc;
781 	int error = 0, oldmtu;
782 	struct epoch_tracker et;
783 
784 	NET_EPOCH_ENTER(et);
785 
786 	switch (cmd) {
787 
788 	case SIOCADDMULTI:
789 	case SIOCDELMULTI:
790 		break;
791 
792 	case SIOCGDRVSPEC:
793 	case SIOCSDRVSPEC:
794 		if (ifd->ifd_cmd >= bridge_control_table_size) {
795 			error = EINVAL;
796 			break;
797 		}
798 		bc = &bridge_control_table[ifd->ifd_cmd];
799 
800 		if (cmd == SIOCGDRVSPEC &&
801 		    (bc->bc_flags & BC_F_COPYOUT) == 0) {
802 			error = EINVAL;
803 			break;
804 		}
805 		else if (cmd == SIOCSDRVSPEC &&
806 		    (bc->bc_flags & BC_F_COPYOUT) != 0) {
807 			error = EINVAL;
808 			break;
809 		}
810 
811 		if (bc->bc_flags & BC_F_SUSER) {
812 			error = priv_check(td, PRIV_NET_BRIDGE);
813 			if (error)
814 				break;
815 		}
816 
817 		if (ifd->ifd_len != bc->bc_argsize ||
818 		    ifd->ifd_len > sizeof(args)) {
819 			error = EINVAL;
820 			break;
821 		}
822 
823 		bzero(&args, sizeof(args));
824 		if (bc->bc_flags & BC_F_COPYIN) {
825 			error = copyin(ifd->ifd_data, &args, ifd->ifd_len);
826 			if (error)
827 				break;
828 		}
829 
830 		oldmtu = ifp->if_mtu;
831 		BRIDGE_LOCK(sc);
832 		error = (*bc->bc_func)(sc, &args);
833 		BRIDGE_UNLOCK(sc);
834 		if (error)
835 			break;
836 
837 		/*
838 		 * Bridge MTU may change during addition of the first port.
839 		 * If it did, do network layer specific procedure.
840 		 */
841 		if (ifp->if_mtu != oldmtu) {
842 #ifdef INET6
843 			nd6_setmtu(ifp);
844 #endif
845 			rt_updatemtu(ifp);
846 		}
847 
848 		if (bc->bc_flags & BC_F_COPYOUT)
849 			error = copyout(&args, ifd->ifd_data, ifd->ifd_len);
850 
851 		break;
852 
853 	case SIOCSIFFLAGS:
854 		if (!(ifp->if_flags & IFF_UP) &&
855 		    (ifp->if_drv_flags & IFF_DRV_RUNNING)) {
856 			/*
857 			 * If interface is marked down and it is running,
858 			 * then stop and disable it.
859 			 */
860 			BRIDGE_LOCK(sc);
861 			bridge_stop(ifp, 1);
862 			BRIDGE_UNLOCK(sc);
863 		} else if ((ifp->if_flags & IFF_UP) &&
864 		    !(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
865 			/*
866 			 * If interface is marked up and it is stopped, then
867 			 * start it.
868 			 */
869 			(*ifp->if_init)(sc);
870 		}
871 		break;
872 
873 	case SIOCSIFMTU:
874 		if (ifr->ifr_mtu < 576) {
875 			error = EINVAL;
876 			break;
877 		}
878 		if (CK_LIST_EMPTY(&sc->sc_iflist)) {
879 			sc->sc_ifp->if_mtu = ifr->ifr_mtu;
880 			break;
881 		}
882 		BRIDGE_LOCK(sc);
883 		CK_LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
884 			if (bif->bif_ifp->if_mtu != ifr->ifr_mtu) {
885 				log(LOG_NOTICE, "%s: invalid MTU: %u(%s)"
886 				    " != %d\n", sc->sc_ifp->if_xname,
887 				    bif->bif_ifp->if_mtu,
888 				    bif->bif_ifp->if_xname, ifr->ifr_mtu);
889 				error = EINVAL;
890 				break;
891 			}
892 		}
893 		if (!error)
894 			sc->sc_ifp->if_mtu = ifr->ifr_mtu;
895 		BRIDGE_UNLOCK(sc);
896 		break;
897 	default:
898 		/*
899 		 * drop the lock as ether_ioctl() will call bridge_start() and
900 		 * cause the lock to be recursed.
901 		 */
902 		error = ether_ioctl(ifp, cmd, data);
903 		break;
904 	}
905 
906 	NET_EPOCH_EXIT(et);
907 
908 	return (error);
909 }
910 
911 /*
912  * bridge_mutecaps:
913  *
914  *	Clear or restore unwanted capabilities on the member interface
915  */
916 static void
917 bridge_mutecaps(struct bridge_softc *sc)
918 {
919 	struct bridge_iflist *bif;
920 	int enabled, mask;
921 
922 	BRIDGE_LOCK_ASSERT(sc);
923 
924 	/* Initial bitmask of capabilities to test */
925 	mask = BRIDGE_IFCAPS_MASK;
926 
927 	CK_LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
928 		/* Every member must support it or its disabled */
929 		mask &= bif->bif_savedcaps;
930 	}
931 
932 	CK_LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
933 		enabled = bif->bif_ifp->if_capenable;
934 		enabled &= ~BRIDGE_IFCAPS_STRIP;
935 		/* strip off mask bits and enable them again if allowed */
936 		enabled &= ~BRIDGE_IFCAPS_MASK;
937 		enabled |= mask;
938 		BRIDGE_UNLOCK(sc);
939 		bridge_set_ifcap(sc, bif, enabled);
940 		BRIDGE_LOCK(sc);
941 	}
942 }
943 
944 static void
945 bridge_set_ifcap(struct bridge_softc *sc, struct bridge_iflist *bif, int set)
946 {
947 	struct ifnet *ifp = bif->bif_ifp;
948 	struct ifreq ifr;
949 	int error, mask, stuck;
950 
951 	BRIDGE_UNLOCK_ASSERT(sc);
952 
953 	bzero(&ifr, sizeof(ifr));
954 	ifr.ifr_reqcap = set;
955 
956 	if (ifp->if_capenable != set) {
957 		error = (*ifp->if_ioctl)(ifp, SIOCSIFCAP, (caddr_t)&ifr);
958 		if (error)
959 			if_printf(sc->sc_ifp,
960 			    "error setting capabilities on %s: %d\n",
961 			    ifp->if_xname, error);
962 		mask = BRIDGE_IFCAPS_MASK | BRIDGE_IFCAPS_STRIP;
963 		stuck = ifp->if_capenable & mask & ~set;
964 		if (stuck != 0)
965 			if_printf(sc->sc_ifp,
966 			    "can't disable some capabilities on %s: 0x%x\n",
967 			    ifp->if_xname, stuck);
968 	}
969 }
970 
971 /*
972  * bridge_lookup_member:
973  *
974  *	Lookup a bridge member interface.
975  */
976 static struct bridge_iflist *
977 bridge_lookup_member(struct bridge_softc *sc, const char *name)
978 {
979 	struct bridge_iflist *bif;
980 	struct ifnet *ifp;
981 
982 	NET_EPOCH_ASSERT();
983 
984 	CK_LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
985 		ifp = bif->bif_ifp;
986 		if (strcmp(ifp->if_xname, name) == 0)
987 			return (bif);
988 	}
989 
990 	return (NULL);
991 }
992 
993 /*
994  * bridge_lookup_member_if:
995  *
996  *	Lookup a bridge member interface by ifnet*.
997  */
998 static struct bridge_iflist *
999 bridge_lookup_member_if(struct bridge_softc *sc, struct ifnet *member_ifp)
1000 {
1001 	struct bridge_iflist *bif;
1002 
1003 	NET_EPOCH_ASSERT();
1004 
1005 	CK_LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
1006 		if (bif->bif_ifp == member_ifp)
1007 			return (bif);
1008 	}
1009 
1010 	return (NULL);
1011 }
1012 
1013 static void
1014 bridge_delete_member_cb(struct epoch_context *ctx)
1015 {
1016 	struct bridge_iflist *bif;
1017 
1018 	bif = __containerof(ctx, struct bridge_iflist, bif_epoch_ctx);
1019 
1020 	free(bif, M_DEVBUF);
1021 }
1022 
1023 /*
1024  * bridge_delete_member:
1025  *
1026  *	Delete the specified member interface.
1027  */
1028 static void
1029 bridge_delete_member(struct bridge_softc *sc, struct bridge_iflist *bif,
1030     int gone)
1031 {
1032 	struct ifnet *ifs = bif->bif_ifp;
1033 	struct ifnet *fif = NULL;
1034 	struct bridge_iflist *bifl;
1035 
1036 	BRIDGE_LOCK_ASSERT(sc);
1037 
1038 	if (bif->bif_flags & IFBIF_STP)
1039 		bstp_disable(&bif->bif_stp);
1040 
1041 	ifs->if_bridge = NULL;
1042 	CK_LIST_REMOVE(bif, bif_next);
1043 
1044 	/*
1045 	 * If removing the interface that gave the bridge its mac address, set
1046 	 * the mac address of the bridge to the address of the next member, or
1047 	 * to its default address if no members are left.
1048 	 */
1049 	if (V_bridge_inherit_mac && sc->sc_ifaddr == ifs) {
1050 		if (CK_LIST_EMPTY(&sc->sc_iflist)) {
1051 			bcopy(&sc->sc_defaddr,
1052 			    IF_LLADDR(sc->sc_ifp), ETHER_ADDR_LEN);
1053 			sc->sc_ifaddr = NULL;
1054 		} else {
1055 			bifl = CK_LIST_FIRST(&sc->sc_iflist);
1056 			fif = bifl->bif_ifp;
1057 			bcopy(IF_LLADDR(fif),
1058 			    IF_LLADDR(sc->sc_ifp), ETHER_ADDR_LEN);
1059 			sc->sc_ifaddr = fif;
1060 		}
1061 		EVENTHANDLER_INVOKE(iflladdr_event, sc->sc_ifp);
1062 	}
1063 
1064 	bridge_linkcheck(sc);
1065 	bridge_mutecaps(sc);	/* recalcuate now this interface is removed */
1066 	bridge_rtdelete(sc, ifs, IFBF_FLUSHALL);
1067 	KASSERT(bif->bif_addrcnt == 0,
1068 	    ("%s: %d bridge routes referenced", __func__, bif->bif_addrcnt));
1069 
1070 	ifs->if_bridge_output = NULL;
1071 	ifs->if_bridge_input = NULL;
1072 	ifs->if_bridge_linkstate = NULL;
1073 	BRIDGE_UNLOCK(sc);
1074 	if (!gone) {
1075 		switch (ifs->if_type) {
1076 		case IFT_ETHER:
1077 		case IFT_L2VLAN:
1078 			/*
1079 			 * Take the interface out of promiscuous mode, but only
1080 			 * if it was promiscuous in the first place. It might
1081 			 * not be if we're in the bridge_ioctl_add() error path.
1082 			 */
1083 			if (ifs->if_flags & IFF_PROMISC)
1084 				(void) ifpromisc(ifs, 0);
1085 			break;
1086 
1087 		case IFT_GIF:
1088 			break;
1089 
1090 		default:
1091 #ifdef DIAGNOSTIC
1092 			panic("bridge_delete_member: impossible");
1093 #endif
1094 			break;
1095 		}
1096 		/* reneable any interface capabilities */
1097 		bridge_set_ifcap(sc, bif, bif->bif_savedcaps);
1098 	}
1099 	bstp_destroy(&bif->bif_stp);	/* prepare to free */
1100 	BRIDGE_LOCK(sc);
1101 
1102 	NET_EPOCH_CALL(bridge_delete_member_cb, &bif->bif_epoch_ctx);
1103 }
1104 
1105 /*
1106  * bridge_delete_span:
1107  *
1108  *	Delete the specified span interface.
1109  */
1110 static void
1111 bridge_delete_span(struct bridge_softc *sc, struct bridge_iflist *bif)
1112 {
1113 	BRIDGE_LOCK_ASSERT(sc);
1114 
1115 	KASSERT(bif->bif_ifp->if_bridge == NULL,
1116 	    ("%s: not a span interface", __func__));
1117 
1118 	CK_LIST_REMOVE(bif, bif_next);
1119 
1120 	NET_EPOCH_CALL(bridge_delete_member_cb, &bif->bif_epoch_ctx);
1121 }
1122 
1123 static int
1124 bridge_ioctl_add(struct bridge_softc *sc, void *arg)
1125 {
1126 	struct ifbreq *req = arg;
1127 	struct bridge_iflist *bif = NULL;
1128 	struct ifnet *ifs;
1129 	int error = 0;
1130 
1131 	ifs = ifunit(req->ifbr_ifsname);
1132 	if (ifs == NULL)
1133 		return (ENOENT);
1134 	if (ifs->if_ioctl == NULL)	/* must be supported */
1135 		return (EINVAL);
1136 
1137 	/* If it's in the span list, it can't be a member. */
1138 	CK_LIST_FOREACH(bif, &sc->sc_spanlist, bif_next)
1139 		if (ifs == bif->bif_ifp)
1140 			return (EBUSY);
1141 
1142 	if (ifs->if_bridge == sc)
1143 		return (EEXIST);
1144 
1145 	if (ifs->if_bridge != NULL)
1146 		return (EBUSY);
1147 
1148 	switch (ifs->if_type) {
1149 	case IFT_ETHER:
1150 	case IFT_L2VLAN:
1151 	case IFT_GIF:
1152 		/* permitted interface types */
1153 		break;
1154 	default:
1155 		return (EINVAL);
1156 	}
1157 
1158 #ifdef INET6
1159 	/*
1160 	 * Two valid inet6 addresses with link-local scope must not be
1161 	 * on the parent interface and the member interfaces at the
1162 	 * same time.  This restriction is needed to prevent violation
1163 	 * of link-local scope zone.  Attempts to add a member
1164 	 * interface which has inet6 addresses when the parent has
1165 	 * inet6 triggers removal of all inet6 addresses on the member
1166 	 * interface.
1167 	 */
1168 
1169 	/* Check if the parent interface has a link-local scope addr. */
1170 	if (V_allow_llz_overlap == 0 &&
1171 	    in6ifa_llaonifp(sc->sc_ifp) != NULL) {
1172 		/*
1173 		 * If any, remove all inet6 addresses from the member
1174 		 * interfaces.
1175 		 */
1176 		CK_LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
1177  			if (in6ifa_llaonifp(bif->bif_ifp)) {
1178 				BRIDGE_UNLOCK(sc);
1179 				in6_ifdetach(bif->bif_ifp);
1180 				BRIDGE_LOCK(sc);
1181 				if_printf(sc->sc_ifp,
1182 				    "IPv6 addresses on %s have been removed "
1183 				    "before adding it as a member to prevent "
1184 				    "IPv6 address scope violation.\n",
1185 				    bif->bif_ifp->if_xname);
1186 			}
1187 		}
1188 		if (in6ifa_llaonifp(ifs)) {
1189 			BRIDGE_UNLOCK(sc);
1190 			in6_ifdetach(ifs);
1191 			BRIDGE_LOCK(sc);
1192 			if_printf(sc->sc_ifp,
1193 			    "IPv6 addresses on %s have been removed "
1194 			    "before adding it as a member to prevent "
1195 			    "IPv6 address scope violation.\n",
1196 			    ifs->if_xname);
1197 		}
1198 	}
1199 #endif
1200 	/* Allow the first Ethernet member to define the MTU */
1201 	if (CK_LIST_EMPTY(&sc->sc_iflist))
1202 		sc->sc_ifp->if_mtu = ifs->if_mtu;
1203 	else if (sc->sc_ifp->if_mtu != ifs->if_mtu) {
1204 		if_printf(sc->sc_ifp, "invalid MTU: %u(%s) != %u\n",
1205 		    ifs->if_mtu, ifs->if_xname, sc->sc_ifp->if_mtu);
1206 		return (EINVAL);
1207 	}
1208 
1209 	bif = malloc(sizeof(*bif), M_DEVBUF, M_NOWAIT|M_ZERO);
1210 	if (bif == NULL)
1211 		return (ENOMEM);
1212 
1213 	bif->bif_ifp = ifs;
1214 	bif->bif_flags = IFBIF_LEARNING | IFBIF_DISCOVER;
1215 	bif->bif_savedcaps = ifs->if_capenable;
1216 
1217 	/*
1218 	 * Assign the interface's MAC address to the bridge if it's the first
1219 	 * member and the MAC address of the bridge has not been changed from
1220 	 * the default randomly generated one.
1221 	 */
1222 	if (V_bridge_inherit_mac && CK_LIST_EMPTY(&sc->sc_iflist) &&
1223 	    !memcmp(IF_LLADDR(sc->sc_ifp), sc->sc_defaddr.octet, ETHER_ADDR_LEN)) {
1224 		bcopy(IF_LLADDR(ifs), IF_LLADDR(sc->sc_ifp), ETHER_ADDR_LEN);
1225 		sc->sc_ifaddr = ifs;
1226 		EVENTHANDLER_INVOKE(iflladdr_event, sc->sc_ifp);
1227 	}
1228 
1229 	ifs->if_bridge = sc;
1230 	ifs->if_bridge_output = bridge_output;
1231 	ifs->if_bridge_input = bridge_input;
1232 	ifs->if_bridge_linkstate = bridge_linkstate;
1233 	bstp_create(&sc->sc_stp, &bif->bif_stp, bif->bif_ifp);
1234 	/*
1235 	 * XXX: XLOCK HERE!?!
1236 	 *
1237 	 * NOTE: insert_***HEAD*** should be safe for the traversals.
1238 	 */
1239 	CK_LIST_INSERT_HEAD(&sc->sc_iflist, bif, bif_next);
1240 
1241 	/* Set interface capabilities to the intersection set of all members */
1242 	bridge_mutecaps(sc);
1243 	bridge_linkcheck(sc);
1244 
1245 	/* Place the interface into promiscuous mode */
1246 	switch (ifs->if_type) {
1247 		case IFT_ETHER:
1248 		case IFT_L2VLAN:
1249 			BRIDGE_UNLOCK(sc);
1250 			error = ifpromisc(ifs, 1);
1251 			BRIDGE_LOCK(sc);
1252 			break;
1253 	}
1254 
1255 	if (error)
1256 		bridge_delete_member(sc, bif, 0);
1257 	return (error);
1258 }
1259 
1260 static int
1261 bridge_ioctl_del(struct bridge_softc *sc, void *arg)
1262 {
1263 	struct ifbreq *req = arg;
1264 	struct bridge_iflist *bif;
1265 
1266 	bif = bridge_lookup_member(sc, req->ifbr_ifsname);
1267 	if (bif == NULL)
1268 		return (ENOENT);
1269 
1270 	bridge_delete_member(sc, bif, 0);
1271 
1272 	return (0);
1273 }
1274 
1275 static int
1276 bridge_ioctl_gifflags(struct bridge_softc *sc, void *arg)
1277 {
1278 	struct ifbreq *req = arg;
1279 	struct bridge_iflist *bif;
1280 	struct bstp_port *bp;
1281 
1282 	bif = bridge_lookup_member(sc, req->ifbr_ifsname);
1283 	if (bif == NULL)
1284 		return (ENOENT);
1285 
1286 	bp = &bif->bif_stp;
1287 	req->ifbr_ifsflags = bif->bif_flags;
1288 	req->ifbr_state = bp->bp_state;
1289 	req->ifbr_priority = bp->bp_priority;
1290 	req->ifbr_path_cost = bp->bp_path_cost;
1291 	req->ifbr_portno = bif->bif_ifp->if_index & 0xfff;
1292 	req->ifbr_proto = bp->bp_protover;
1293 	req->ifbr_role = bp->bp_role;
1294 	req->ifbr_stpflags = bp->bp_flags;
1295 	req->ifbr_addrcnt = bif->bif_addrcnt;
1296 	req->ifbr_addrmax = bif->bif_addrmax;
1297 	req->ifbr_addrexceeded = bif->bif_addrexceeded;
1298 
1299 	/* Copy STP state options as flags */
1300 	if (bp->bp_operedge)
1301 		req->ifbr_ifsflags |= IFBIF_BSTP_EDGE;
1302 	if (bp->bp_flags & BSTP_PORT_AUTOEDGE)
1303 		req->ifbr_ifsflags |= IFBIF_BSTP_AUTOEDGE;
1304 	if (bp->bp_ptp_link)
1305 		req->ifbr_ifsflags |= IFBIF_BSTP_PTP;
1306 	if (bp->bp_flags & BSTP_PORT_AUTOPTP)
1307 		req->ifbr_ifsflags |= IFBIF_BSTP_AUTOPTP;
1308 	if (bp->bp_flags & BSTP_PORT_ADMEDGE)
1309 		req->ifbr_ifsflags |= IFBIF_BSTP_ADMEDGE;
1310 	if (bp->bp_flags & BSTP_PORT_ADMCOST)
1311 		req->ifbr_ifsflags |= IFBIF_BSTP_ADMCOST;
1312 	return (0);
1313 }
1314 
1315 static int
1316 bridge_ioctl_sifflags(struct bridge_softc *sc, void *arg)
1317 {
1318 	struct ifbreq *req = arg;
1319 	struct bridge_iflist *bif;
1320 	struct bstp_port *bp;
1321 	int error;
1322 
1323 	bif = bridge_lookup_member(sc, req->ifbr_ifsname);
1324 	if (bif == NULL)
1325 		return (ENOENT);
1326 	bp = &bif->bif_stp;
1327 
1328 	if (req->ifbr_ifsflags & IFBIF_SPAN)
1329 		/* SPAN is readonly */
1330 		return (EINVAL);
1331 
1332 	if (req->ifbr_ifsflags & IFBIF_STP) {
1333 		if ((bif->bif_flags & IFBIF_STP) == 0) {
1334 			error = bstp_enable(&bif->bif_stp);
1335 			if (error)
1336 				return (error);
1337 		}
1338 	} else {
1339 		if ((bif->bif_flags & IFBIF_STP) != 0)
1340 			bstp_disable(&bif->bif_stp);
1341 	}
1342 
1343 	/* Pass on STP flags */
1344 	bstp_set_edge(bp, req->ifbr_ifsflags & IFBIF_BSTP_EDGE ? 1 : 0);
1345 	bstp_set_autoedge(bp, req->ifbr_ifsflags & IFBIF_BSTP_AUTOEDGE ? 1 : 0);
1346 	bstp_set_ptp(bp, req->ifbr_ifsflags & IFBIF_BSTP_PTP ? 1 : 0);
1347 	bstp_set_autoptp(bp, req->ifbr_ifsflags & IFBIF_BSTP_AUTOPTP ? 1 : 0);
1348 
1349 	/* Save the bits relating to the bridge */
1350 	bif->bif_flags = req->ifbr_ifsflags & IFBIFMASK;
1351 
1352 	return (0);
1353 }
1354 
1355 static int
1356 bridge_ioctl_scache(struct bridge_softc *sc, void *arg)
1357 {
1358 	struct ifbrparam *param = arg;
1359 
1360 	sc->sc_brtmax = param->ifbrp_csize;
1361 	bridge_rttrim(sc);
1362 
1363 	return (0);
1364 }
1365 
1366 static int
1367 bridge_ioctl_gcache(struct bridge_softc *sc, void *arg)
1368 {
1369 	struct ifbrparam *param = arg;
1370 
1371 	param->ifbrp_csize = sc->sc_brtmax;
1372 
1373 	return (0);
1374 }
1375 
1376 static int
1377 bridge_ioctl_gifs(struct bridge_softc *sc, void *arg)
1378 {
1379 	struct ifbifconf *bifc = arg;
1380 	struct bridge_iflist *bif;
1381 	struct ifbreq breq;
1382 	char *buf, *outbuf;
1383 	int count, buflen, len, error = 0;
1384 
1385 	count = 0;
1386 	CK_LIST_FOREACH(bif, &sc->sc_iflist, bif_next)
1387 		count++;
1388 	CK_LIST_FOREACH(bif, &sc->sc_spanlist, bif_next)
1389 		count++;
1390 
1391 	buflen = sizeof(breq) * count;
1392 	if (bifc->ifbic_len == 0) {
1393 		bifc->ifbic_len = buflen;
1394 		return (0);
1395 	}
1396 	BRIDGE_UNLOCK(sc);
1397 	outbuf = malloc(buflen, M_TEMP, M_WAITOK | M_ZERO);
1398 	BRIDGE_LOCK(sc);
1399 
1400 	count = 0;
1401 	buf = outbuf;
1402 	len = min(bifc->ifbic_len, buflen);
1403 	bzero(&breq, sizeof(breq));
1404 	CK_LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
1405 		if (len < sizeof(breq))
1406 			break;
1407 
1408 		strlcpy(breq.ifbr_ifsname, bif->bif_ifp->if_xname,
1409 		    sizeof(breq.ifbr_ifsname));
1410 		/* Fill in the ifbreq structure */
1411 		error = bridge_ioctl_gifflags(sc, &breq);
1412 		if (error)
1413 			break;
1414 		memcpy(buf, &breq, sizeof(breq));
1415 		count++;
1416 		buf += sizeof(breq);
1417 		len -= sizeof(breq);
1418 	}
1419 	CK_LIST_FOREACH(bif, &sc->sc_spanlist, bif_next) {
1420 		if (len < sizeof(breq))
1421 			break;
1422 
1423 		strlcpy(breq.ifbr_ifsname, bif->bif_ifp->if_xname,
1424 		    sizeof(breq.ifbr_ifsname));
1425 		breq.ifbr_ifsflags = bif->bif_flags;
1426 		breq.ifbr_portno = bif->bif_ifp->if_index & 0xfff;
1427 		memcpy(buf, &breq, sizeof(breq));
1428 		count++;
1429 		buf += sizeof(breq);
1430 		len -= sizeof(breq);
1431 	}
1432 
1433 	BRIDGE_UNLOCK(sc);
1434 	bifc->ifbic_len = sizeof(breq) * count;
1435 	error = copyout(outbuf, bifc->ifbic_req, bifc->ifbic_len);
1436 	BRIDGE_LOCK(sc);
1437 	free(outbuf, M_TEMP);
1438 	return (error);
1439 }
1440 
1441 static int
1442 bridge_ioctl_rts(struct bridge_softc *sc, void *arg)
1443 {
1444 	struct ifbaconf *bac = arg;
1445 	struct bridge_rtnode *brt;
1446 	struct ifbareq bareq;
1447 	char *buf, *outbuf;
1448 	int count, buflen, len, error = 0;
1449 
1450 	if (bac->ifbac_len == 0)
1451 		return (0);
1452 
1453 	count = 0;
1454 	CK_LIST_FOREACH(brt, &sc->sc_rtlist, brt_list)
1455 		count++;
1456 	buflen = sizeof(bareq) * count;
1457 
1458 	BRIDGE_UNLOCK(sc);
1459 	outbuf = malloc(buflen, M_TEMP, M_WAITOK | M_ZERO);
1460 	BRIDGE_LOCK(sc);
1461 
1462 	count = 0;
1463 	buf = outbuf;
1464 	len = min(bac->ifbac_len, buflen);
1465 	bzero(&bareq, sizeof(bareq));
1466 	CK_LIST_FOREACH(brt, &sc->sc_rtlist, brt_list) {
1467 		if (len < sizeof(bareq))
1468 			goto out;
1469 		strlcpy(bareq.ifba_ifsname, brt->brt_ifp->if_xname,
1470 		    sizeof(bareq.ifba_ifsname));
1471 		memcpy(bareq.ifba_dst, brt->brt_addr, sizeof(brt->brt_addr));
1472 		bareq.ifba_vlan = brt->brt_vlan;
1473 		if ((brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC &&
1474 				time_uptime < brt->brt_expire)
1475 			bareq.ifba_expire = brt->brt_expire - time_uptime;
1476 		else
1477 			bareq.ifba_expire = 0;
1478 		bareq.ifba_flags = brt->brt_flags;
1479 
1480 		memcpy(buf, &bareq, sizeof(bareq));
1481 		count++;
1482 		buf += sizeof(bareq);
1483 		len -= sizeof(bareq);
1484 	}
1485 out:
1486 	BRIDGE_UNLOCK(sc);
1487 	bac->ifbac_len = sizeof(bareq) * count;
1488 	error = copyout(outbuf, bac->ifbac_req, bac->ifbac_len);
1489 	BRIDGE_LOCK(sc);
1490 	free(outbuf, M_TEMP);
1491 	return (error);
1492 }
1493 
1494 static int
1495 bridge_ioctl_saddr(struct bridge_softc *sc, void *arg)
1496 {
1497 	struct ifbareq *req = arg;
1498 	struct bridge_iflist *bif;
1499 	int error;
1500 
1501 	NET_EPOCH_ASSERT();
1502 
1503 	bif = bridge_lookup_member(sc, req->ifba_ifsname);
1504 	if (bif == NULL)
1505 		return (ENOENT);
1506 
1507 	/* bridge_rtupdate() may acquire the lock. */
1508 	BRIDGE_UNLOCK(sc);
1509 	error = bridge_rtupdate(sc, req->ifba_dst, req->ifba_vlan, bif, 1,
1510 	    req->ifba_flags);
1511 	BRIDGE_LOCK(sc);
1512 
1513 	return (error);
1514 }
1515 
1516 static int
1517 bridge_ioctl_sto(struct bridge_softc *sc, void *arg)
1518 {
1519 	struct ifbrparam *param = arg;
1520 
1521 	sc->sc_brttimeout = param->ifbrp_ctime;
1522 	return (0);
1523 }
1524 
1525 static int
1526 bridge_ioctl_gto(struct bridge_softc *sc, void *arg)
1527 {
1528 	struct ifbrparam *param = arg;
1529 
1530 	param->ifbrp_ctime = sc->sc_brttimeout;
1531 	return (0);
1532 }
1533 
1534 static int
1535 bridge_ioctl_daddr(struct bridge_softc *sc, void *arg)
1536 {
1537 	struct ifbareq *req = arg;
1538 
1539 	return (bridge_rtdaddr(sc, req->ifba_dst, req->ifba_vlan));
1540 }
1541 
1542 static int
1543 bridge_ioctl_flush(struct bridge_softc *sc, void *arg)
1544 {
1545 	struct ifbreq *req = arg;
1546 
1547 	bridge_rtflush(sc, req->ifbr_ifsflags);
1548 	return (0);
1549 }
1550 
1551 static int
1552 bridge_ioctl_gpri(struct bridge_softc *sc, void *arg)
1553 {
1554 	struct ifbrparam *param = arg;
1555 	struct bstp_state *bs = &sc->sc_stp;
1556 
1557 	param->ifbrp_prio = bs->bs_bridge_priority;
1558 	return (0);
1559 }
1560 
1561 static int
1562 bridge_ioctl_spri(struct bridge_softc *sc, void *arg)
1563 {
1564 	struct ifbrparam *param = arg;
1565 
1566 	return (bstp_set_priority(&sc->sc_stp, param->ifbrp_prio));
1567 }
1568 
1569 static int
1570 bridge_ioctl_ght(struct bridge_softc *sc, void *arg)
1571 {
1572 	struct ifbrparam *param = arg;
1573 	struct bstp_state *bs = &sc->sc_stp;
1574 
1575 	param->ifbrp_hellotime = bs->bs_bridge_htime >> 8;
1576 	return (0);
1577 }
1578 
1579 static int
1580 bridge_ioctl_sht(struct bridge_softc *sc, void *arg)
1581 {
1582 	struct ifbrparam *param = arg;
1583 
1584 	return (bstp_set_htime(&sc->sc_stp, param->ifbrp_hellotime));
1585 }
1586 
1587 static int
1588 bridge_ioctl_gfd(struct bridge_softc *sc, void *arg)
1589 {
1590 	struct ifbrparam *param = arg;
1591 	struct bstp_state *bs = &sc->sc_stp;
1592 
1593 	param->ifbrp_fwddelay = bs->bs_bridge_fdelay >> 8;
1594 	return (0);
1595 }
1596 
1597 static int
1598 bridge_ioctl_sfd(struct bridge_softc *sc, void *arg)
1599 {
1600 	struct ifbrparam *param = arg;
1601 
1602 	return (bstp_set_fdelay(&sc->sc_stp, param->ifbrp_fwddelay));
1603 }
1604 
1605 static int
1606 bridge_ioctl_gma(struct bridge_softc *sc, void *arg)
1607 {
1608 	struct ifbrparam *param = arg;
1609 	struct bstp_state *bs = &sc->sc_stp;
1610 
1611 	param->ifbrp_maxage = bs->bs_bridge_max_age >> 8;
1612 	return (0);
1613 }
1614 
1615 static int
1616 bridge_ioctl_sma(struct bridge_softc *sc, void *arg)
1617 {
1618 	struct ifbrparam *param = arg;
1619 
1620 	return (bstp_set_maxage(&sc->sc_stp, param->ifbrp_maxage));
1621 }
1622 
1623 static int
1624 bridge_ioctl_sifprio(struct bridge_softc *sc, void *arg)
1625 {
1626 	struct ifbreq *req = arg;
1627 	struct bridge_iflist *bif;
1628 
1629 	bif = bridge_lookup_member(sc, req->ifbr_ifsname);
1630 	if (bif == NULL)
1631 		return (ENOENT);
1632 
1633 	return (bstp_set_port_priority(&bif->bif_stp, req->ifbr_priority));
1634 }
1635 
1636 static int
1637 bridge_ioctl_sifcost(struct bridge_softc *sc, void *arg)
1638 {
1639 	struct ifbreq *req = arg;
1640 	struct bridge_iflist *bif;
1641 
1642 	bif = bridge_lookup_member(sc, req->ifbr_ifsname);
1643 	if (bif == NULL)
1644 		return (ENOENT);
1645 
1646 	return (bstp_set_path_cost(&bif->bif_stp, req->ifbr_path_cost));
1647 }
1648 
1649 static int
1650 bridge_ioctl_sifmaxaddr(struct bridge_softc *sc, void *arg)
1651 {
1652 	struct ifbreq *req = arg;
1653 	struct bridge_iflist *bif;
1654 
1655 	bif = bridge_lookup_member(sc, req->ifbr_ifsname);
1656 	if (bif == NULL)
1657 		return (ENOENT);
1658 
1659 	bif->bif_addrmax = req->ifbr_addrmax;
1660 	return (0);
1661 }
1662 
1663 static int
1664 bridge_ioctl_addspan(struct bridge_softc *sc, void *arg)
1665 {
1666 	struct ifbreq *req = arg;
1667 	struct bridge_iflist *bif = NULL;
1668 	struct ifnet *ifs;
1669 
1670 	ifs = ifunit(req->ifbr_ifsname);
1671 	if (ifs == NULL)
1672 		return (ENOENT);
1673 
1674 	CK_LIST_FOREACH(bif, &sc->sc_spanlist, bif_next)
1675 		if (ifs == bif->bif_ifp)
1676 			return (EBUSY);
1677 
1678 	if (ifs->if_bridge != NULL)
1679 		return (EBUSY);
1680 
1681 	switch (ifs->if_type) {
1682 		case IFT_ETHER:
1683 		case IFT_GIF:
1684 		case IFT_L2VLAN:
1685 			break;
1686 		default:
1687 			return (EINVAL);
1688 	}
1689 
1690 	bif = malloc(sizeof(*bif), M_DEVBUF, M_NOWAIT|M_ZERO);
1691 	if (bif == NULL)
1692 		return (ENOMEM);
1693 
1694 	bif->bif_ifp = ifs;
1695 	bif->bif_flags = IFBIF_SPAN;
1696 
1697 	CK_LIST_INSERT_HEAD(&sc->sc_spanlist, bif, bif_next);
1698 
1699 	return (0);
1700 }
1701 
1702 static int
1703 bridge_ioctl_delspan(struct bridge_softc *sc, void *arg)
1704 {
1705 	struct ifbreq *req = arg;
1706 	struct bridge_iflist *bif;
1707 	struct ifnet *ifs;
1708 
1709 	ifs = ifunit(req->ifbr_ifsname);
1710 	if (ifs == NULL)
1711 		return (ENOENT);
1712 
1713 	CK_LIST_FOREACH(bif, &sc->sc_spanlist, bif_next)
1714 		if (ifs == bif->bif_ifp)
1715 			break;
1716 
1717 	if (bif == NULL)
1718 		return (ENOENT);
1719 
1720 	bridge_delete_span(sc, bif);
1721 
1722 	return (0);
1723 }
1724 
1725 static int
1726 bridge_ioctl_gbparam(struct bridge_softc *sc, void *arg)
1727 {
1728 	struct ifbropreq *req = arg;
1729 	struct bstp_state *bs = &sc->sc_stp;
1730 	struct bstp_port *root_port;
1731 
1732 	req->ifbop_maxage = bs->bs_bridge_max_age >> 8;
1733 	req->ifbop_hellotime = bs->bs_bridge_htime >> 8;
1734 	req->ifbop_fwddelay = bs->bs_bridge_fdelay >> 8;
1735 
1736 	root_port = bs->bs_root_port;
1737 	if (root_port == NULL)
1738 		req->ifbop_root_port = 0;
1739 	else
1740 		req->ifbop_root_port = root_port->bp_ifp->if_index;
1741 
1742 	req->ifbop_holdcount = bs->bs_txholdcount;
1743 	req->ifbop_priority = bs->bs_bridge_priority;
1744 	req->ifbop_protocol = bs->bs_protover;
1745 	req->ifbop_root_path_cost = bs->bs_root_pv.pv_cost;
1746 	req->ifbop_bridgeid = bs->bs_bridge_pv.pv_dbridge_id;
1747 	req->ifbop_designated_root = bs->bs_root_pv.pv_root_id;
1748 	req->ifbop_designated_bridge = bs->bs_root_pv.pv_dbridge_id;
1749 	req->ifbop_last_tc_time.tv_sec = bs->bs_last_tc_time.tv_sec;
1750 	req->ifbop_last_tc_time.tv_usec = bs->bs_last_tc_time.tv_usec;
1751 
1752 	return (0);
1753 }
1754 
1755 static int
1756 bridge_ioctl_grte(struct bridge_softc *sc, void *arg)
1757 {
1758 	struct ifbrparam *param = arg;
1759 
1760 	param->ifbrp_cexceeded = sc->sc_brtexceeded;
1761 	return (0);
1762 }
1763 
1764 static int
1765 bridge_ioctl_gifsstp(struct bridge_softc *sc, void *arg)
1766 {
1767 	struct ifbpstpconf *bifstp = arg;
1768 	struct bridge_iflist *bif;
1769 	struct bstp_port *bp;
1770 	struct ifbpstpreq bpreq;
1771 	char *buf, *outbuf;
1772 	int count, buflen, len, error = 0;
1773 
1774 	count = 0;
1775 	CK_LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
1776 		if ((bif->bif_flags & IFBIF_STP) != 0)
1777 			count++;
1778 	}
1779 
1780 	buflen = sizeof(bpreq) * count;
1781 	if (bifstp->ifbpstp_len == 0) {
1782 		bifstp->ifbpstp_len = buflen;
1783 		return (0);
1784 	}
1785 
1786 	BRIDGE_UNLOCK(sc);
1787 	outbuf = malloc(buflen, M_TEMP, M_WAITOK | M_ZERO);
1788 	BRIDGE_LOCK(sc);
1789 
1790 	count = 0;
1791 	buf = outbuf;
1792 	len = min(bifstp->ifbpstp_len, buflen);
1793 	bzero(&bpreq, sizeof(bpreq));
1794 	CK_LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
1795 		if (len < sizeof(bpreq))
1796 			break;
1797 
1798 		if ((bif->bif_flags & IFBIF_STP) == 0)
1799 			continue;
1800 
1801 		bp = &bif->bif_stp;
1802 		bpreq.ifbp_portno = bif->bif_ifp->if_index & 0xfff;
1803 		bpreq.ifbp_fwd_trans = bp->bp_forward_transitions;
1804 		bpreq.ifbp_design_cost = bp->bp_desg_pv.pv_cost;
1805 		bpreq.ifbp_design_port = bp->bp_desg_pv.pv_port_id;
1806 		bpreq.ifbp_design_bridge = bp->bp_desg_pv.pv_dbridge_id;
1807 		bpreq.ifbp_design_root = bp->bp_desg_pv.pv_root_id;
1808 
1809 		memcpy(buf, &bpreq, sizeof(bpreq));
1810 		count++;
1811 		buf += sizeof(bpreq);
1812 		len -= sizeof(bpreq);
1813 	}
1814 
1815 	BRIDGE_UNLOCK(sc);
1816 	bifstp->ifbpstp_len = sizeof(bpreq) * count;
1817 	error = copyout(outbuf, bifstp->ifbpstp_req, bifstp->ifbpstp_len);
1818 	BRIDGE_LOCK(sc);
1819 	free(outbuf, M_TEMP);
1820 	return (error);
1821 }
1822 
1823 static int
1824 bridge_ioctl_sproto(struct bridge_softc *sc, void *arg)
1825 {
1826 	struct ifbrparam *param = arg;
1827 
1828 	return (bstp_set_protocol(&sc->sc_stp, param->ifbrp_proto));
1829 }
1830 
1831 static int
1832 bridge_ioctl_stxhc(struct bridge_softc *sc, void *arg)
1833 {
1834 	struct ifbrparam *param = arg;
1835 
1836 	return (bstp_set_holdcount(&sc->sc_stp, param->ifbrp_txhc));
1837 }
1838 
1839 /*
1840  * bridge_ifdetach:
1841  *
1842  *	Detach an interface from a bridge.  Called when a member
1843  *	interface is detaching.
1844  */
1845 static void
1846 bridge_ifdetach(void *arg __unused, struct ifnet *ifp)
1847 {
1848 	struct bridge_softc *sc = ifp->if_bridge;
1849 	struct bridge_iflist *bif;
1850 	struct epoch_tracker et;
1851 
1852 	if (ifp->if_flags & IFF_RENAMING)
1853 		return;
1854 	if (V_bridge_cloner == NULL) {
1855 		/*
1856 		 * This detach handler can be called after
1857 		 * vnet_bridge_uninit().  Just return in that case.
1858 		 */
1859 		return;
1860 	}
1861 	NET_EPOCH_ENTER(et);
1862 	/* Check if the interface is a bridge member */
1863 	if (sc != NULL) {
1864 		BRIDGE_LOCK(sc);
1865 
1866 		bif = bridge_lookup_member_if(sc, ifp);
1867 		if (bif != NULL)
1868 			bridge_delete_member(sc, bif, 1);
1869 
1870 		BRIDGE_UNLOCK(sc);
1871 		NET_EPOCH_EXIT(et);
1872 		return;
1873 	}
1874 
1875 	/* Check if the interface is a span port */
1876 	BRIDGE_LIST_LOCK();
1877 	LIST_FOREACH(sc, &V_bridge_list, sc_list) {
1878 		BRIDGE_LOCK(sc);
1879 		CK_LIST_FOREACH(bif, &sc->sc_spanlist, bif_next)
1880 			if (ifp == bif->bif_ifp) {
1881 				bridge_delete_span(sc, bif);
1882 				break;
1883 			}
1884 
1885 		BRIDGE_UNLOCK(sc);
1886 	}
1887 	BRIDGE_LIST_UNLOCK();
1888 	NET_EPOCH_EXIT(et);
1889 }
1890 
1891 /*
1892  * bridge_init:
1893  *
1894  *	Initialize a bridge interface.
1895  */
1896 static void
1897 bridge_init(void *xsc)
1898 {
1899 	struct bridge_softc *sc = (struct bridge_softc *)xsc;
1900 	struct ifnet *ifp = sc->sc_ifp;
1901 
1902 	if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1903 		return;
1904 
1905 	BRIDGE_LOCK(sc);
1906 	callout_reset(&sc->sc_brcallout, bridge_rtable_prune_period * hz,
1907 	    bridge_timer, sc);
1908 
1909 	ifp->if_drv_flags |= IFF_DRV_RUNNING;
1910 	bstp_init(&sc->sc_stp);		/* Initialize Spanning Tree */
1911 
1912 	BRIDGE_UNLOCK(sc);
1913 }
1914 
1915 /*
1916  * bridge_stop:
1917  *
1918  *	Stop the bridge interface.
1919  */
1920 static void
1921 bridge_stop(struct ifnet *ifp, int disable)
1922 {
1923 	struct bridge_softc *sc = ifp->if_softc;
1924 
1925 	NET_EPOCH_ASSERT();
1926 	BRIDGE_LOCK_ASSERT(sc);
1927 
1928 	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1929 		return;
1930 
1931 	callout_stop(&sc->sc_brcallout);
1932 	bstp_stop(&sc->sc_stp);
1933 
1934 	bridge_rtflush(sc, IFBF_FLUSHDYN);
1935 
1936 	ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1937 }
1938 
1939 /*
1940  * bridge_enqueue:
1941  *
1942  *	Enqueue a packet on a bridge member interface.
1943  *
1944  */
1945 static int
1946 bridge_enqueue(struct bridge_softc *sc, struct ifnet *dst_ifp, struct mbuf *m)
1947 {
1948 	int len, err = 0;
1949 	short mflags;
1950 	struct mbuf *m0;
1951 
1952 	/* We may be sending a fragment so traverse the mbuf */
1953 	for (; m; m = m0) {
1954 		m0 = m->m_nextpkt;
1955 		m->m_nextpkt = NULL;
1956 		len = m->m_pkthdr.len;
1957 		mflags = m->m_flags;
1958 
1959 		/*
1960 		 * If underlying interface can not do VLAN tag insertion itself
1961 		 * then attach a packet tag that holds it.
1962 		 */
1963 		if ((m->m_flags & M_VLANTAG) &&
1964 		    (dst_ifp->if_capenable & IFCAP_VLAN_HWTAGGING) == 0) {
1965 			m = ether_vlanencap(m, m->m_pkthdr.ether_vtag);
1966 			if (m == NULL) {
1967 				if_printf(dst_ifp,
1968 				    "unable to prepend VLAN header\n");
1969 				if_inc_counter(dst_ifp, IFCOUNTER_OERRORS, 1);
1970 				continue;
1971 			}
1972 			m->m_flags &= ~M_VLANTAG;
1973 		}
1974 
1975 		M_ASSERTPKTHDR(m); /* We shouldn't transmit mbuf without pkthdr */
1976 		if ((err = dst_ifp->if_transmit(dst_ifp, m))) {
1977 			m_freem(m0);
1978 			if_inc_counter(sc->sc_ifp, IFCOUNTER_OERRORS, 1);
1979 			break;
1980 		}
1981 
1982 		if_inc_counter(sc->sc_ifp, IFCOUNTER_OPACKETS, 1);
1983 		if_inc_counter(sc->sc_ifp, IFCOUNTER_OBYTES, len);
1984 		if (mflags & M_MCAST)
1985 			if_inc_counter(sc->sc_ifp, IFCOUNTER_OMCASTS, 1);
1986 	}
1987 
1988 	return (err);
1989 }
1990 
1991 /*
1992  * bridge_dummynet:
1993  *
1994  * 	Receive a queued packet from dummynet and pass it on to the output
1995  * 	interface.
1996  *
1997  *	The mbuf has the Ethernet header already attached.
1998  */
1999 static void
2000 bridge_dummynet(struct mbuf *m, struct ifnet *ifp)
2001 {
2002 	struct bridge_softc *sc;
2003 
2004 	sc = ifp->if_bridge;
2005 
2006 	/*
2007 	 * The packet didnt originate from a member interface. This should only
2008 	 * ever happen if a member interface is removed while packets are
2009 	 * queued for it.
2010 	 */
2011 	if (sc == NULL) {
2012 		m_freem(m);
2013 		return;
2014 	}
2015 
2016 	if (PFIL_HOOKED_OUT(V_inet_pfil_head)
2017 #ifdef INET6
2018 	    || PFIL_HOOKED_OUT(V_inet6_pfil_head)
2019 #endif
2020 	    ) {
2021 		if (bridge_pfil(&m, sc->sc_ifp, ifp, PFIL_OUT) != 0)
2022 			return;
2023 		if (m == NULL)
2024 			return;
2025 	}
2026 
2027 	bridge_enqueue(sc, ifp, m);
2028 }
2029 
2030 /*
2031  * bridge_output:
2032  *
2033  *	Send output from a bridge member interface.  This
2034  *	performs the bridging function for locally originated
2035  *	packets.
2036  *
2037  *	The mbuf has the Ethernet header already attached.  We must
2038  *	enqueue or free the mbuf before returning.
2039  */
2040 static int
2041 bridge_output(struct ifnet *ifp, struct mbuf *m, struct sockaddr *sa,
2042     struct rtentry *rt)
2043 {
2044 	struct ether_header *eh;
2045 	struct ifnet *bifp, *dst_if;
2046 	struct bridge_softc *sc;
2047 	uint16_t vlan;
2048 
2049 	NET_EPOCH_ASSERT();
2050 
2051 	if (m->m_len < ETHER_HDR_LEN) {
2052 		m = m_pullup(m, ETHER_HDR_LEN);
2053 		if (m == NULL)
2054 			return (0);
2055 	}
2056 
2057 	eh = mtod(m, struct ether_header *);
2058 	sc = ifp->if_bridge;
2059 	vlan = VLANTAGOF(m);
2060 
2061 	bifp = sc->sc_ifp;
2062 
2063 	/*
2064 	 * If bridge is down, but the original output interface is up,
2065 	 * go ahead and send out that interface.  Otherwise, the packet
2066 	 * is dropped below.
2067 	 */
2068 	if ((bifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
2069 		dst_if = ifp;
2070 		goto sendunicast;
2071 	}
2072 
2073 	/*
2074 	 * If the packet is a multicast, or we don't know a better way to
2075 	 * get there, send to all interfaces.
2076 	 */
2077 	if (ETHER_IS_MULTICAST(eh->ether_dhost))
2078 		dst_if = NULL;
2079 	else
2080 		dst_if = bridge_rtlookup(sc, eh->ether_dhost, vlan);
2081 	/* Tap any traffic not passing back out the originating interface */
2082 	if (dst_if != ifp)
2083 		ETHER_BPF_MTAP(bifp, m);
2084 	if (dst_if == NULL) {
2085 		struct bridge_iflist *bif;
2086 		struct mbuf *mc;
2087 		int used = 0;
2088 
2089 		bridge_span(sc, m);
2090 
2091 		CK_LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
2092 			dst_if = bif->bif_ifp;
2093 
2094 			if (dst_if->if_type == IFT_GIF)
2095 				continue;
2096 			if ((dst_if->if_drv_flags & IFF_DRV_RUNNING) == 0)
2097 				continue;
2098 
2099 			/*
2100 			 * If this is not the original output interface,
2101 			 * and the interface is participating in spanning
2102 			 * tree, make sure the port is in a state that
2103 			 * allows forwarding.
2104 			 */
2105 			if (dst_if != ifp && (bif->bif_flags & IFBIF_STP) &&
2106 			    bif->bif_stp.bp_state == BSTP_IFSTATE_DISCARDING)
2107 				continue;
2108 
2109 			if (CK_LIST_NEXT(bif, bif_next) == NULL) {
2110 				used = 1;
2111 				mc = m;
2112 			} else {
2113 				mc = m_copypacket(m, M_NOWAIT);
2114 				if (mc == NULL) {
2115 					if_inc_counter(bifp, IFCOUNTER_OERRORS, 1);
2116 					continue;
2117 				}
2118 			}
2119 
2120 			bridge_enqueue(sc, dst_if, mc);
2121 		}
2122 		if (used == 0)
2123 			m_freem(m);
2124 		return (0);
2125 	}
2126 
2127 sendunicast:
2128 	/*
2129 	 * XXX Spanning tree consideration here?
2130 	 */
2131 
2132 	bridge_span(sc, m);
2133 	if ((dst_if->if_drv_flags & IFF_DRV_RUNNING) == 0) {
2134 		m_freem(m);
2135 		return (0);
2136 	}
2137 
2138 	bridge_enqueue(sc, dst_if, m);
2139 	return (0);
2140 }
2141 
2142 /*
2143  * bridge_transmit:
2144  *
2145  *	Do output on a bridge.
2146  *
2147  */
2148 static int
2149 bridge_transmit(struct ifnet *ifp, struct mbuf *m)
2150 {
2151 	struct bridge_softc *sc;
2152 	struct ether_header *eh;
2153 	struct ifnet *dst_if;
2154 	int error = 0;
2155 
2156 	sc = ifp->if_softc;
2157 
2158 	ETHER_BPF_MTAP(ifp, m);
2159 
2160 	eh = mtod(m, struct ether_header *);
2161 
2162 	if (((m->m_flags & (M_BCAST|M_MCAST)) == 0) &&
2163 	    (dst_if = bridge_rtlookup(sc, eh->ether_dhost, 1)) != NULL) {
2164 		error = bridge_enqueue(sc, dst_if, m);
2165 	} else
2166 		bridge_broadcast(sc, ifp, m, 0);
2167 
2168 	return (error);
2169 }
2170 
2171 /*
2172  * The ifp->if_qflush entry point for if_bridge(4) is no-op.
2173  */
2174 static void
2175 bridge_qflush(struct ifnet *ifp __unused)
2176 {
2177 }
2178 
2179 /*
2180  * bridge_forward:
2181  *
2182  *	The forwarding function of the bridge.
2183  *
2184  *	NOTE: Releases the lock on return.
2185  */
2186 static void
2187 bridge_forward(struct bridge_softc *sc, struct bridge_iflist *sbif,
2188     struct mbuf *m)
2189 {
2190 	struct bridge_iflist *dbif;
2191 	struct ifnet *src_if, *dst_if, *ifp;
2192 	struct ether_header *eh;
2193 	uint16_t vlan;
2194 	uint8_t *dst;
2195 	int error;
2196 
2197 	NET_EPOCH_ASSERT();
2198 
2199 	src_if = m->m_pkthdr.rcvif;
2200 	ifp = sc->sc_ifp;
2201 
2202 	if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
2203 	if_inc_counter(ifp, IFCOUNTER_IBYTES, m->m_pkthdr.len);
2204 	vlan = VLANTAGOF(m);
2205 
2206 	if ((sbif->bif_flags & IFBIF_STP) &&
2207 	    sbif->bif_stp.bp_state == BSTP_IFSTATE_DISCARDING)
2208 		goto drop;
2209 
2210 	eh = mtod(m, struct ether_header *);
2211 	dst = eh->ether_dhost;
2212 
2213 	/* If the interface is learning, record the address. */
2214 	if (sbif->bif_flags & IFBIF_LEARNING) {
2215 		error = bridge_rtupdate(sc, eh->ether_shost, vlan,
2216 		    sbif, 0, IFBAF_DYNAMIC);
2217 		/*
2218 		 * If the interface has addresses limits then deny any source
2219 		 * that is not in the cache.
2220 		 */
2221 		if (error && sbif->bif_addrmax)
2222 			goto drop;
2223 	}
2224 
2225 	if ((sbif->bif_flags & IFBIF_STP) != 0 &&
2226 	    sbif->bif_stp.bp_state == BSTP_IFSTATE_LEARNING)
2227 		goto drop;
2228 
2229 	/*
2230 	 * At this point, the port either doesn't participate
2231 	 * in spanning tree or it is in the forwarding state.
2232 	 */
2233 
2234 	/*
2235 	 * If the packet is unicast, destined for someone on
2236 	 * "this" side of the bridge, drop it.
2237 	 */
2238 	if ((m->m_flags & (M_BCAST|M_MCAST)) == 0) {
2239 		dst_if = bridge_rtlookup(sc, dst, vlan);
2240 		if (src_if == dst_if)
2241 			goto drop;
2242 	} else {
2243 		/*
2244 		 * Check if its a reserved multicast address, any address
2245 		 * listed in 802.1D section 7.12.6 may not be forwarded by the
2246 		 * bridge.
2247 		 * This is currently 01-80-C2-00-00-00 to 01-80-C2-00-00-0F
2248 		 */
2249 		if (dst[0] == 0x01 && dst[1] == 0x80 &&
2250 		    dst[2] == 0xc2 && dst[3] == 0x00 &&
2251 		    dst[4] == 0x00 && dst[5] <= 0x0f)
2252 			goto drop;
2253 
2254 		/* ...forward it to all interfaces. */
2255 		if_inc_counter(ifp, IFCOUNTER_IMCASTS, 1);
2256 		dst_if = NULL;
2257 	}
2258 
2259 	/*
2260 	 * If we have a destination interface which is a member of our bridge,
2261 	 * OR this is a unicast packet, push it through the bpf(4) machinery.
2262 	 * For broadcast or multicast packets, don't bother because it will
2263 	 * be reinjected into ether_input. We do this before we pass the packets
2264 	 * through the pfil(9) framework, as it is possible that pfil(9) will
2265 	 * drop the packet, or possibly modify it, making it difficult to debug
2266 	 * firewall issues on the bridge.
2267 	 */
2268 	if (dst_if != NULL || (m->m_flags & (M_BCAST | M_MCAST)) == 0)
2269 		ETHER_BPF_MTAP(ifp, m);
2270 
2271 	/* run the packet filter */
2272 	if (PFIL_HOOKED_IN(V_inet_pfil_head)
2273 #ifdef INET6
2274 	    || PFIL_HOOKED_IN(V_inet6_pfil_head)
2275 #endif
2276 	    ) {
2277 		if (bridge_pfil(&m, ifp, src_if, PFIL_IN) != 0)
2278 			return;
2279 		if (m == NULL)
2280 			return;
2281 	}
2282 
2283 	if (dst_if == NULL) {
2284 		bridge_broadcast(sc, src_if, m, 1);
2285 		return;
2286 	}
2287 
2288 	/*
2289 	 * At this point, we're dealing with a unicast frame
2290 	 * going to a different interface.
2291 	 */
2292 	if ((dst_if->if_drv_flags & IFF_DRV_RUNNING) == 0)
2293 		goto drop;
2294 
2295 	dbif = bridge_lookup_member_if(sc, dst_if);
2296 	if (dbif == NULL)
2297 		/* Not a member of the bridge (anymore?) */
2298 		goto drop;
2299 
2300 	/* Private segments can not talk to each other */
2301 	if (sbif->bif_flags & dbif->bif_flags & IFBIF_PRIVATE)
2302 		goto drop;
2303 
2304 	if ((dbif->bif_flags & IFBIF_STP) &&
2305 	    dbif->bif_stp.bp_state == BSTP_IFSTATE_DISCARDING)
2306 		goto drop;
2307 
2308 	if (PFIL_HOOKED_OUT(V_inet_pfil_head)
2309 #ifdef INET6
2310 	    || PFIL_HOOKED_OUT(V_inet6_pfil_head)
2311 #endif
2312 	    ) {
2313 		if (bridge_pfil(&m, ifp, dst_if, PFIL_OUT) != 0)
2314 			return;
2315 		if (m == NULL)
2316 			return;
2317 	}
2318 
2319 	bridge_enqueue(sc, dst_if, m);
2320 	return;
2321 
2322 drop:
2323 	m_freem(m);
2324 }
2325 
2326 /*
2327  * bridge_input:
2328  *
2329  *	Receive input from a member interface.  Queue the packet for
2330  *	bridging if it is not for us.
2331  */
2332 static struct mbuf *
2333 bridge_input(struct ifnet *ifp, struct mbuf *m)
2334 {
2335 	struct bridge_softc *sc = ifp->if_bridge;
2336 	struct bridge_iflist *bif, *bif2;
2337 	struct ifnet *bifp;
2338 	struct ether_header *eh;
2339 	struct mbuf *mc, *mc2;
2340 	uint16_t vlan;
2341 	int error;
2342 
2343 	NET_EPOCH_ASSERT();
2344 
2345 	if ((sc->sc_ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
2346 		return (m);
2347 
2348 	bifp = sc->sc_ifp;
2349 	vlan = VLANTAGOF(m);
2350 
2351 	/*
2352 	 * Implement support for bridge monitoring. If this flag has been
2353 	 * set on this interface, discard the packet once we push it through
2354 	 * the bpf(4) machinery, but before we do, increment the byte and
2355 	 * packet counters associated with this interface.
2356 	 */
2357 	if ((bifp->if_flags & IFF_MONITOR) != 0) {
2358 		m->m_pkthdr.rcvif  = bifp;
2359 		ETHER_BPF_MTAP(bifp, m);
2360 		if_inc_counter(bifp, IFCOUNTER_IPACKETS, 1);
2361 		if_inc_counter(bifp, IFCOUNTER_IBYTES, m->m_pkthdr.len);
2362 		m_freem(m);
2363 		return (NULL);
2364 	}
2365 	bif = bridge_lookup_member_if(sc, ifp);
2366 	if (bif == NULL) {
2367 		return (m);
2368 	}
2369 
2370 	eh = mtod(m, struct ether_header *);
2371 
2372 	bridge_span(sc, m);
2373 
2374 	if (m->m_flags & (M_BCAST|M_MCAST)) {
2375 		/* Tap off 802.1D packets; they do not get forwarded. */
2376 		if (memcmp(eh->ether_dhost, bstp_etheraddr,
2377 		    ETHER_ADDR_LEN) == 0) {
2378 			bstp_input(&bif->bif_stp, ifp, m); /* consumes mbuf */
2379 			return (NULL);
2380 		}
2381 
2382 		if ((bif->bif_flags & IFBIF_STP) &&
2383 		    bif->bif_stp.bp_state == BSTP_IFSTATE_DISCARDING) {
2384 			return (m);
2385 		}
2386 
2387 		/*
2388 		 * Make a deep copy of the packet and enqueue the copy
2389 		 * for bridge processing; return the original packet for
2390 		 * local processing.
2391 		 */
2392 		mc = m_dup(m, M_NOWAIT);
2393 		if (mc == NULL) {
2394 			return (m);
2395 		}
2396 
2397 		/* Perform the bridge forwarding function with the copy. */
2398 		bridge_forward(sc, bif, mc);
2399 
2400 		/*
2401 		 * Reinject the mbuf as arriving on the bridge so we have a
2402 		 * chance at claiming multicast packets. We can not loop back
2403 		 * here from ether_input as a bridge is never a member of a
2404 		 * bridge.
2405 		 */
2406 		KASSERT(bifp->if_bridge == NULL,
2407 		    ("loop created in bridge_input"));
2408 		mc2 = m_dup(m, M_NOWAIT);
2409 		if (mc2 != NULL) {
2410 			/* Keep the layer3 header aligned */
2411 			int i = min(mc2->m_pkthdr.len, max_protohdr);
2412 			mc2 = m_copyup(mc2, i, ETHER_ALIGN);
2413 		}
2414 		if (mc2 != NULL) {
2415 			mc2->m_pkthdr.rcvif = bifp;
2416 			(*bifp->if_input)(bifp, mc2);
2417 		}
2418 
2419 		/* Return the original packet for local processing. */
2420 		return (m);
2421 	}
2422 
2423 	if ((bif->bif_flags & IFBIF_STP) &&
2424 	    bif->bif_stp.bp_state == BSTP_IFSTATE_DISCARDING) {
2425 		return (m);
2426 	}
2427 
2428 #if (defined(INET) || defined(INET6))
2429 #   define OR_CARP_CHECK_WE_ARE_DST(iface) \
2430 	|| ((iface)->if_carp \
2431 	    && (*carp_forus_p)((iface), eh->ether_dhost))
2432 #   define OR_CARP_CHECK_WE_ARE_SRC(iface) \
2433 	|| ((iface)->if_carp \
2434 	    && (*carp_forus_p)((iface), eh->ether_shost))
2435 #else
2436 #   define OR_CARP_CHECK_WE_ARE_DST(iface)
2437 #   define OR_CARP_CHECK_WE_ARE_SRC(iface)
2438 #endif
2439 
2440 #ifdef INET6
2441 #   define OR_PFIL_HOOKED_INET6 \
2442 	|| PFIL_HOOKED_IN(V_inet6_pfil_head)
2443 #else
2444 #   define OR_PFIL_HOOKED_INET6
2445 #endif
2446 
2447 #define GRAB_OUR_PACKETS(iface) \
2448 	if ((iface)->if_type == IFT_GIF) \
2449 		continue; \
2450 	/* It is destined for us. */ \
2451 	if (memcmp(IF_LLADDR((iface)), eh->ether_dhost,  ETHER_ADDR_LEN) == 0 \
2452 	    OR_CARP_CHECK_WE_ARE_DST((iface))				\
2453 	    ) {								\
2454 		if (bif->bif_flags & IFBIF_LEARNING) {			\
2455 			error = bridge_rtupdate(sc, eh->ether_shost,	\
2456 			    vlan, bif, 0, IFBAF_DYNAMIC);		\
2457 			if (error && bif->bif_addrmax) {		\
2458 				m_freem(m);				\
2459 				return (NULL);				\
2460 			}						\
2461 		}							\
2462 		m->m_pkthdr.rcvif = iface;				\
2463 		if ((iface) == ifp) {					\
2464 			/* Skip bridge processing... src == dest */	\
2465 			return (m);					\
2466 		}							\
2467 		/* It's passing over or to the bridge, locally. */	\
2468 		ETHER_BPF_MTAP(bifp, m);				\
2469 		if_inc_counter(bifp, IFCOUNTER_IPACKETS, 1);		\
2470 		if_inc_counter(bifp, IFCOUNTER_IBYTES, m->m_pkthdr.len); \
2471 		/* Filter on the physical interface. */			\
2472 		if (V_pfil_local_phys && (PFIL_HOOKED_IN(V_inet_pfil_head) \
2473 		     OR_PFIL_HOOKED_INET6)) {				\
2474 			if (bridge_pfil(&m, NULL, ifp,			\
2475 			    PFIL_IN) != 0 || m == NULL) {		\
2476 				return (NULL);				\
2477 			}						\
2478 		}							\
2479 		if ((iface) != bifp)					\
2480 			ETHER_BPF_MTAP(iface, m);			\
2481 		return (m);						\
2482 	}								\
2483 									\
2484 	/* We just received a packet that we sent out. */		\
2485 	if (memcmp(IF_LLADDR((iface)), eh->ether_shost, ETHER_ADDR_LEN) == 0 \
2486 	    OR_CARP_CHECK_WE_ARE_SRC((iface))			\
2487 	    ) {								\
2488 		m_freem(m);						\
2489 		return (NULL);						\
2490 	}
2491 
2492 	/*
2493 	 * Unicast.  Make sure it's not for the bridge.
2494 	 */
2495 	do { GRAB_OUR_PACKETS(bifp) } while (0);
2496 
2497 	/*
2498 	 * Give a chance for ifp at first priority. This will help when	the
2499 	 * packet comes through the interface like VLAN's with the same MACs
2500 	 * on several interfaces from the same bridge. This also will save
2501 	 * some CPU cycles in case the destination interface and the input
2502 	 * interface (eq ifp) are the same.
2503 	 */
2504 	do { GRAB_OUR_PACKETS(ifp) } while (0);
2505 
2506 	/* Now check the all bridge members. */
2507 	CK_LIST_FOREACH(bif2, &sc->sc_iflist, bif_next) {
2508 		GRAB_OUR_PACKETS(bif2->bif_ifp)
2509 	}
2510 
2511 #undef OR_CARP_CHECK_WE_ARE_DST
2512 #undef OR_CARP_CHECK_WE_ARE_SRC
2513 #undef OR_PFIL_HOOKED_INET6
2514 #undef GRAB_OUR_PACKETS
2515 
2516 	/* Perform the bridge forwarding function. */
2517 	bridge_forward(sc, bif, m);
2518 
2519 	return (NULL);
2520 }
2521 
2522 /*
2523  * bridge_broadcast:
2524  *
2525  *	Send a frame to all interfaces that are members of
2526  *	the bridge, except for the one on which the packet
2527  *	arrived.
2528  *
2529  *	NOTE: Releases the lock on return.
2530  */
2531 static void
2532 bridge_broadcast(struct bridge_softc *sc, struct ifnet *src_if,
2533     struct mbuf *m, int runfilt)
2534 {
2535 	struct bridge_iflist *dbif, *sbif;
2536 	struct mbuf *mc;
2537 	struct ifnet *dst_if;
2538 	int used = 0, i;
2539 
2540 	NET_EPOCH_ASSERT();
2541 
2542 	sbif = bridge_lookup_member_if(sc, src_if);
2543 
2544 	/* Filter on the bridge interface before broadcasting */
2545 	if (runfilt && (PFIL_HOOKED_OUT(V_inet_pfil_head)
2546 #ifdef INET6
2547 	    || PFIL_HOOKED_OUT(V_inet6_pfil_head)
2548 #endif
2549 	    )) {
2550 		if (bridge_pfil(&m, sc->sc_ifp, NULL, PFIL_OUT) != 0)
2551 			return;
2552 		if (m == NULL)
2553 			return;
2554 	}
2555 
2556 	CK_LIST_FOREACH(dbif, &sc->sc_iflist, bif_next) {
2557 		dst_if = dbif->bif_ifp;
2558 		if (dst_if == src_if)
2559 			continue;
2560 
2561 		/* Private segments can not talk to each other */
2562 		if (sbif && (sbif->bif_flags & dbif->bif_flags & IFBIF_PRIVATE))
2563 			continue;
2564 
2565 		if ((dbif->bif_flags & IFBIF_STP) &&
2566 		    dbif->bif_stp.bp_state == BSTP_IFSTATE_DISCARDING)
2567 			continue;
2568 
2569 		if ((dbif->bif_flags & IFBIF_DISCOVER) == 0 &&
2570 		    (m->m_flags & (M_BCAST|M_MCAST)) == 0)
2571 			continue;
2572 
2573 		if ((dst_if->if_drv_flags & IFF_DRV_RUNNING) == 0)
2574 			continue;
2575 
2576 		if (CK_LIST_NEXT(dbif, bif_next) == NULL) {
2577 			mc = m;
2578 			used = 1;
2579 		} else {
2580 			mc = m_dup(m, M_NOWAIT);
2581 			if (mc == NULL) {
2582 				if_inc_counter(sc->sc_ifp, IFCOUNTER_OERRORS, 1);
2583 				continue;
2584 			}
2585 		}
2586 
2587 		/*
2588 		 * Filter on the output interface. Pass a NULL bridge interface
2589 		 * pointer so we do not redundantly filter on the bridge for
2590 		 * each interface we broadcast on.
2591 		 */
2592 		if (runfilt && (PFIL_HOOKED_OUT(V_inet_pfil_head)
2593 #ifdef INET6
2594 		    || PFIL_HOOKED_OUT(V_inet6_pfil_head)
2595 #endif
2596 		    )) {
2597 			if (used == 0) {
2598 				/* Keep the layer3 header aligned */
2599 				i = min(mc->m_pkthdr.len, max_protohdr);
2600 				mc = m_copyup(mc, i, ETHER_ALIGN);
2601 				if (mc == NULL) {
2602 					if_inc_counter(sc->sc_ifp, IFCOUNTER_OERRORS, 1);
2603 					continue;
2604 				}
2605 			}
2606 			if (bridge_pfil(&mc, NULL, dst_if, PFIL_OUT) != 0)
2607 				continue;
2608 			if (mc == NULL)
2609 				continue;
2610 		}
2611 
2612 		bridge_enqueue(sc, dst_if, mc);
2613 	}
2614 	if (used == 0)
2615 		m_freem(m);
2616 }
2617 
2618 /*
2619  * bridge_span:
2620  *
2621  *	Duplicate a packet out one or more interfaces that are in span mode,
2622  *	the original mbuf is unmodified.
2623  */
2624 static void
2625 bridge_span(struct bridge_softc *sc, struct mbuf *m)
2626 {
2627 	struct bridge_iflist *bif;
2628 	struct ifnet *dst_if;
2629 	struct mbuf *mc;
2630 
2631 	NET_EPOCH_ASSERT();
2632 
2633 	if (CK_LIST_EMPTY(&sc->sc_spanlist))
2634 		return;
2635 
2636 	CK_LIST_FOREACH(bif, &sc->sc_spanlist, bif_next) {
2637 		dst_if = bif->bif_ifp;
2638 
2639 		if ((dst_if->if_drv_flags & IFF_DRV_RUNNING) == 0)
2640 			continue;
2641 
2642 		mc = m_copypacket(m, M_NOWAIT);
2643 		if (mc == NULL) {
2644 			if_inc_counter(sc->sc_ifp, IFCOUNTER_OERRORS, 1);
2645 			continue;
2646 		}
2647 
2648 		bridge_enqueue(sc, dst_if, mc);
2649 	}
2650 }
2651 
2652 /*
2653  * bridge_rtupdate:
2654  *
2655  *	Add a bridge routing entry.
2656  */
2657 static int
2658 bridge_rtupdate(struct bridge_softc *sc, const uint8_t *dst, uint16_t vlan,
2659     struct bridge_iflist *bif, int setflags, uint8_t flags)
2660 {
2661 	struct bridge_rtnode *brt;
2662 	int error;
2663 
2664 	NET_EPOCH_ASSERT();
2665 	BRIDGE_UNLOCK_ASSERT(sc);
2666 
2667 	/* Check the source address is valid and not multicast. */
2668 	if (ETHER_IS_MULTICAST(dst) ||
2669 	    (dst[0] == 0 && dst[1] == 0 && dst[2] == 0 &&
2670 	     dst[3] == 0 && dst[4] == 0 && dst[5] == 0) != 0)
2671 		return (EINVAL);
2672 
2673 	/* 802.1p frames map to vlan 1 */
2674 	if (vlan == 0)
2675 		vlan = 1;
2676 
2677 	/*
2678 	 * A route for this destination might already exist.  If so,
2679 	 * update it, otherwise create a new one.
2680 	 */
2681 	if ((brt = bridge_rtnode_lookup(sc, dst, vlan)) == NULL) {
2682 		BRIDGE_LOCK(sc);
2683 
2684 		/* Check again, now that we have the lock. There could have
2685 		 * been a race and we only want to insert this once. */
2686 		if ((brt = bridge_rtnode_lookup(sc, dst, vlan)) != NULL) {
2687 			BRIDGE_UNLOCK(sc);
2688 			return (0);
2689 		}
2690 
2691 		if (sc->sc_brtcnt >= sc->sc_brtmax) {
2692 			sc->sc_brtexceeded++;
2693 			BRIDGE_UNLOCK(sc);
2694 			return (ENOSPC);
2695 		}
2696 		/* Check per interface address limits (if enabled) */
2697 		if (bif->bif_addrmax && bif->bif_addrcnt >= bif->bif_addrmax) {
2698 			bif->bif_addrexceeded++;
2699 			BRIDGE_UNLOCK(sc);
2700 			return (ENOSPC);
2701 		}
2702 
2703 		/*
2704 		 * Allocate a new bridge forwarding node, and
2705 		 * initialize the expiration time and Ethernet
2706 		 * address.
2707 		 */
2708 		brt = uma_zalloc(V_bridge_rtnode_zone, M_NOWAIT | M_ZERO);
2709 		if (brt == NULL) {
2710 			BRIDGE_UNLOCK(sc);
2711 			return (ENOMEM);
2712 		}
2713 		brt->brt_vnet = curvnet;
2714 
2715 		if (bif->bif_flags & IFBIF_STICKY)
2716 			brt->brt_flags = IFBAF_STICKY;
2717 		else
2718 			brt->brt_flags = IFBAF_DYNAMIC;
2719 
2720 		memcpy(brt->brt_addr, dst, ETHER_ADDR_LEN);
2721 		brt->brt_vlan = vlan;
2722 
2723 		if ((error = bridge_rtnode_insert(sc, brt)) != 0) {
2724 			uma_zfree(V_bridge_rtnode_zone, brt);
2725 			BRIDGE_UNLOCK(sc);
2726 			return (error);
2727 		}
2728 		brt->brt_dst = bif;
2729 		bif->bif_addrcnt++;
2730 
2731 		BRIDGE_UNLOCK(sc);
2732 	}
2733 
2734 	if ((brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC &&
2735 	    brt->brt_dst != bif) {
2736 		BRIDGE_LOCK(sc);
2737 		brt->brt_dst->bif_addrcnt--;
2738 		brt->brt_dst = bif;
2739 		brt->brt_dst->bif_addrcnt++;
2740 		BRIDGE_UNLOCK(sc);
2741 	}
2742 
2743 	if ((flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC)
2744 		brt->brt_expire = time_uptime + sc->sc_brttimeout;
2745 	if (setflags)
2746 		brt->brt_flags = flags;
2747 
2748 	return (0);
2749 }
2750 
2751 /*
2752  * bridge_rtlookup:
2753  *
2754  *	Lookup the destination interface for an address.
2755  */
2756 static struct ifnet *
2757 bridge_rtlookup(struct bridge_softc *sc, const uint8_t *addr, uint16_t vlan)
2758 {
2759 	struct bridge_rtnode *brt;
2760 
2761 	NET_EPOCH_ASSERT();
2762 
2763 	if ((brt = bridge_rtnode_lookup(sc, addr, vlan)) == NULL)
2764 		return (NULL);
2765 
2766 	return (brt->brt_ifp);
2767 }
2768 
2769 /*
2770  * bridge_rttrim:
2771  *
2772  *	Trim the routine table so that we have a number
2773  *	of routing entries less than or equal to the
2774  *	maximum number.
2775  */
2776 static void
2777 bridge_rttrim(struct bridge_softc *sc)
2778 {
2779 	struct bridge_rtnode *brt, *nbrt;
2780 
2781 	NET_EPOCH_ASSERT();
2782 	BRIDGE_LOCK_ASSERT(sc);
2783 
2784 	/* Make sure we actually need to do this. */
2785 	if (sc->sc_brtcnt <= sc->sc_brtmax)
2786 		return;
2787 
2788 	/* Force an aging cycle; this might trim enough addresses. */
2789 	bridge_rtage(sc);
2790 	if (sc->sc_brtcnt <= sc->sc_brtmax)
2791 		return;
2792 
2793 	CK_LIST_FOREACH_SAFE(brt, &sc->sc_rtlist, brt_list, nbrt) {
2794 		if ((brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC) {
2795 			bridge_rtnode_destroy(sc, brt);
2796 			if (sc->sc_brtcnt <= sc->sc_brtmax)
2797 				return;
2798 		}
2799 	}
2800 }
2801 
2802 /*
2803  * bridge_timer:
2804  *
2805  *	Aging timer for the bridge.
2806  */
2807 static void
2808 bridge_timer(void *arg)
2809 {
2810 	struct bridge_softc *sc = arg;
2811 	struct epoch_tracker et;
2812 
2813 	NET_EPOCH_ENTER(et);
2814 	BRIDGE_LOCK_ASSERT(sc);
2815 
2816 	/* Destruction of rtnodes requires a proper vnet context */
2817 	CURVNET_SET(sc->sc_ifp->if_vnet);
2818 	bridge_rtage(sc);
2819 
2820 	if (sc->sc_ifp->if_drv_flags & IFF_DRV_RUNNING)
2821 		callout_reset(&sc->sc_brcallout,
2822 		    bridge_rtable_prune_period * hz, bridge_timer, sc);
2823 	CURVNET_RESTORE();
2824 	NET_EPOCH_EXIT(et);
2825 }
2826 
2827 /*
2828  * bridge_rtage:
2829  *
2830  *	Perform an aging cycle.
2831  */
2832 static void
2833 bridge_rtage(struct bridge_softc *sc)
2834 {
2835 	struct bridge_rtnode *brt, *nbrt;
2836 
2837 	NET_EPOCH_ASSERT();
2838 	BRIDGE_LOCK_ASSERT(sc);
2839 
2840 	CK_LIST_FOREACH_SAFE(brt, &sc->sc_rtlist, brt_list, nbrt) {
2841 		if ((brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC) {
2842 			if (time_uptime >= brt->brt_expire)
2843 				bridge_rtnode_destroy(sc, brt);
2844 		}
2845 	}
2846 }
2847 
2848 /*
2849  * bridge_rtflush:
2850  *
2851  *	Remove all dynamic addresses from the bridge.
2852  */
2853 static void
2854 bridge_rtflush(struct bridge_softc *sc, int full)
2855 {
2856 	struct bridge_rtnode *brt, *nbrt;
2857 
2858 	NET_EPOCH_ASSERT();
2859 	BRIDGE_LOCK_ASSERT(sc);
2860 
2861 	CK_LIST_FOREACH_SAFE(brt, &sc->sc_rtlist, brt_list, nbrt) {
2862 		if (full || (brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC)
2863 			bridge_rtnode_destroy(sc, brt);
2864 	}
2865 }
2866 
2867 /*
2868  * bridge_rtdaddr:
2869  *
2870  *	Remove an address from the table.
2871  */
2872 static int
2873 bridge_rtdaddr(struct bridge_softc *sc, const uint8_t *addr, uint16_t vlan)
2874 {
2875 	struct bridge_rtnode *brt;
2876 	int found = 0;
2877 
2878 	NET_EPOCH_ASSERT();
2879 	BRIDGE_LOCK_ASSERT(sc);
2880 
2881 	/*
2882 	 * If vlan is zero then we want to delete for all vlans so the lookup
2883 	 * may return more than one.
2884 	 */
2885 	while ((brt = bridge_rtnode_lookup(sc, addr, vlan)) != NULL) {
2886 		bridge_rtnode_destroy(sc, brt);
2887 		found = 1;
2888 	}
2889 
2890 	return (found ? 0 : ENOENT);
2891 }
2892 
2893 /*
2894  * bridge_rtdelete:
2895  *
2896  *	Delete routes to a speicifc member interface.
2897  */
2898 static void
2899 bridge_rtdelete(struct bridge_softc *sc, struct ifnet *ifp, int full)
2900 {
2901 	struct bridge_rtnode *brt, *nbrt;
2902 
2903 	NET_EPOCH_ASSERT();
2904 	BRIDGE_LOCK_ASSERT(sc);
2905 
2906 	CK_LIST_FOREACH_SAFE(brt, &sc->sc_rtlist, brt_list, nbrt) {
2907 		if (brt->brt_ifp == ifp && (full ||
2908 			    (brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC))
2909 			bridge_rtnode_destroy(sc, brt);
2910 	}
2911 }
2912 
2913 /*
2914  * bridge_rtable_init:
2915  *
2916  *	Initialize the route table for this bridge.
2917  */
2918 static void
2919 bridge_rtable_init(struct bridge_softc *sc)
2920 {
2921 	int i;
2922 
2923 	sc->sc_rthash = malloc(sizeof(*sc->sc_rthash) * BRIDGE_RTHASH_SIZE,
2924 	    M_DEVBUF, M_WAITOK);
2925 
2926 	for (i = 0; i < BRIDGE_RTHASH_SIZE; i++)
2927 		CK_LIST_INIT(&sc->sc_rthash[i]);
2928 
2929 	sc->sc_rthash_key = arc4random();
2930 	CK_LIST_INIT(&sc->sc_rtlist);
2931 }
2932 
2933 /*
2934  * bridge_rtable_fini:
2935  *
2936  *	Deconstruct the route table for this bridge.
2937  */
2938 static void
2939 bridge_rtable_fini(struct bridge_softc *sc)
2940 {
2941 
2942 	KASSERT(sc->sc_brtcnt == 0,
2943 	    ("%s: %d bridge routes referenced", __func__, sc->sc_brtcnt));
2944 	free(sc->sc_rthash, M_DEVBUF);
2945 }
2946 
2947 /*
2948  * The following hash function is adapted from "Hash Functions" by Bob Jenkins
2949  * ("Algorithm Alley", Dr. Dobbs Journal, September 1997).
2950  */
2951 #define	mix(a, b, c)							\
2952 do {									\
2953 	a -= b; a -= c; a ^= (c >> 13);					\
2954 	b -= c; b -= a; b ^= (a << 8);					\
2955 	c -= a; c -= b; c ^= (b >> 13);					\
2956 	a -= b; a -= c; a ^= (c >> 12);					\
2957 	b -= c; b -= a; b ^= (a << 16);					\
2958 	c -= a; c -= b; c ^= (b >> 5);					\
2959 	a -= b; a -= c; a ^= (c >> 3);					\
2960 	b -= c; b -= a; b ^= (a << 10);					\
2961 	c -= a; c -= b; c ^= (b >> 15);					\
2962 } while (/*CONSTCOND*/0)
2963 
2964 static __inline uint32_t
2965 bridge_rthash(struct bridge_softc *sc, const uint8_t *addr)
2966 {
2967 	uint32_t a = 0x9e3779b9, b = 0x9e3779b9, c = sc->sc_rthash_key;
2968 
2969 	b += addr[5] << 8;
2970 	b += addr[4];
2971 	a += addr[3] << 24;
2972 	a += addr[2] << 16;
2973 	a += addr[1] << 8;
2974 	a += addr[0];
2975 
2976 	mix(a, b, c);
2977 
2978 	return (c & BRIDGE_RTHASH_MASK);
2979 }
2980 
2981 #undef mix
2982 
2983 static int
2984 bridge_rtnode_addr_cmp(const uint8_t *a, const uint8_t *b)
2985 {
2986 	int i, d;
2987 
2988 	for (i = 0, d = 0; i < ETHER_ADDR_LEN && d == 0; i++) {
2989 		d = ((int)a[i]) - ((int)b[i]);
2990 	}
2991 
2992 	return (d);
2993 }
2994 
2995 /*
2996  * bridge_rtnode_lookup:
2997  *
2998  *	Look up a bridge route node for the specified destination. Compare the
2999  *	vlan id or if zero then just return the first match.
3000  */
3001 static struct bridge_rtnode *
3002 bridge_rtnode_lookup(struct bridge_softc *sc, const uint8_t *addr, uint16_t vlan)
3003 {
3004 	struct bridge_rtnode *brt;
3005 	uint32_t hash;
3006 	int dir;
3007 
3008 	NET_EPOCH_ASSERT();
3009 
3010 	hash = bridge_rthash(sc, addr);
3011 	CK_LIST_FOREACH(brt, &sc->sc_rthash[hash], brt_hash) {
3012 		dir = bridge_rtnode_addr_cmp(addr, brt->brt_addr);
3013 		if (dir == 0 && (brt->brt_vlan == vlan || vlan == 0))
3014 			return (brt);
3015 		if (dir > 0)
3016 			return (NULL);
3017 	}
3018 
3019 	return (NULL);
3020 }
3021 
3022 /*
3023  * bridge_rtnode_insert:
3024  *
3025  *	Insert the specified bridge node into the route table.  We
3026  *	assume the entry is not already in the table.
3027  */
3028 static int
3029 bridge_rtnode_insert(struct bridge_softc *sc, struct bridge_rtnode *brt)
3030 {
3031 	struct bridge_rtnode *lbrt;
3032 	uint32_t hash;
3033 	int dir;
3034 
3035 	BRIDGE_LOCK_ASSERT(sc);
3036 
3037 	hash = bridge_rthash(sc, brt->brt_addr);
3038 
3039 	lbrt = CK_LIST_FIRST(&sc->sc_rthash[hash]);
3040 	if (lbrt == NULL) {
3041 		CK_LIST_INSERT_HEAD(&sc->sc_rthash[hash], brt, brt_hash);
3042 		goto out;
3043 	}
3044 
3045 	do {
3046 		dir = bridge_rtnode_addr_cmp(brt->brt_addr, lbrt->brt_addr);
3047 		if (dir == 0 && brt->brt_vlan == lbrt->brt_vlan)
3048 			return (EEXIST);
3049 		if (dir > 0) {
3050 			CK_LIST_INSERT_BEFORE(lbrt, brt, brt_hash);
3051 			goto out;
3052 		}
3053 		if (CK_LIST_NEXT(lbrt, brt_hash) == NULL) {
3054 			CK_LIST_INSERT_AFTER(lbrt, brt, brt_hash);
3055 			goto out;
3056 		}
3057 		lbrt = CK_LIST_NEXT(lbrt, brt_hash);
3058 	} while (lbrt != NULL);
3059 
3060 #ifdef DIAGNOSTIC
3061 	panic("bridge_rtnode_insert: impossible");
3062 #endif
3063 
3064 out:
3065 	CK_LIST_INSERT_HEAD(&sc->sc_rtlist, brt, brt_list);
3066 	sc->sc_brtcnt++;
3067 
3068 	return (0);
3069 }
3070 
3071 static void
3072 bridge_rtnode_destroy_cb(struct epoch_context *ctx)
3073 {
3074 	struct bridge_rtnode *brt;
3075 
3076 	brt = __containerof(ctx, struct bridge_rtnode, brt_epoch_ctx);
3077 
3078 	CURVNET_SET(brt->brt_vnet);
3079 	uma_zfree(V_bridge_rtnode_zone, brt);
3080 	CURVNET_RESTORE();
3081 }
3082 
3083 /*
3084  * bridge_rtnode_destroy:
3085  *
3086  *	Destroy a bridge rtnode.
3087  */
3088 static void
3089 bridge_rtnode_destroy(struct bridge_softc *sc, struct bridge_rtnode *brt)
3090 {
3091 	NET_EPOCH_ASSERT();
3092 	BRIDGE_LOCK_ASSERT(sc);
3093 
3094 	CK_LIST_REMOVE(brt, brt_hash);
3095 
3096 	CK_LIST_REMOVE(brt, brt_list);
3097 	sc->sc_brtcnt--;
3098 	brt->brt_dst->bif_addrcnt--;
3099 
3100 	NET_EPOCH_CALL(bridge_rtnode_destroy_cb, &brt->brt_epoch_ctx);
3101 }
3102 
3103 /*
3104  * bridge_rtable_expire:
3105  *
3106  *	Set the expiry time for all routes on an interface.
3107  */
3108 static void
3109 bridge_rtable_expire(struct ifnet *ifp, int age)
3110 {
3111 	struct bridge_softc *sc = ifp->if_bridge;
3112 	struct bridge_rtnode *brt;
3113 	struct epoch_tracker et;
3114 
3115 	NET_EPOCH_ENTER(et);
3116 	CURVNET_SET(ifp->if_vnet);
3117 	BRIDGE_LOCK(sc);
3118 
3119 	/*
3120 	 * If the age is zero then flush, otherwise set all the expiry times to
3121 	 * age for the interface
3122 	 */
3123 	if (age == 0)
3124 		bridge_rtdelete(sc, ifp, IFBF_FLUSHDYN);
3125 	else {
3126 		CK_LIST_FOREACH(brt, &sc->sc_rtlist, brt_list) {
3127 			/* Cap the expiry time to 'age' */
3128 			if (brt->brt_ifp == ifp &&
3129 			    brt->brt_expire > time_uptime + age &&
3130 			    (brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC)
3131 				brt->brt_expire = time_uptime + age;
3132 		}
3133 	}
3134 	BRIDGE_UNLOCK(sc);
3135 	CURVNET_RESTORE();
3136 	NET_EPOCH_EXIT(et);
3137 }
3138 
3139 /*
3140  * bridge_state_change:
3141  *
3142  *	Callback from the bridgestp code when a port changes states.
3143  */
3144 static void
3145 bridge_state_change(struct ifnet *ifp, int state)
3146 {
3147 	struct bridge_softc *sc = ifp->if_bridge;
3148 	static const char *stpstates[] = {
3149 		"disabled",
3150 		"listening",
3151 		"learning",
3152 		"forwarding",
3153 		"blocking",
3154 		"discarding"
3155 	};
3156 
3157 	CURVNET_SET(ifp->if_vnet);
3158 	if (V_log_stp)
3159 		log(LOG_NOTICE, "%s: state changed to %s on %s\n",
3160 		    sc->sc_ifp->if_xname, stpstates[state], ifp->if_xname);
3161 	CURVNET_RESTORE();
3162 }
3163 
3164 /*
3165  * Send bridge packets through pfil if they are one of the types pfil can deal
3166  * with, or if they are ARP or REVARP.  (pfil will pass ARP and REVARP without
3167  * question.) If *bifp or *ifp are NULL then packet filtering is skipped for
3168  * that interface.
3169  */
3170 static int
3171 bridge_pfil(struct mbuf **mp, struct ifnet *bifp, struct ifnet *ifp, int dir)
3172 {
3173 	int snap, error, i, hlen;
3174 	struct ether_header *eh1, eh2;
3175 	struct ip *ip;
3176 	struct llc llc1;
3177 	u_int16_t ether_type;
3178 	pfil_return_t rv;
3179 
3180 	snap = 0;
3181 	error = -1;	/* Default error if not error == 0 */
3182 
3183 #if 0
3184 	/* we may return with the IP fields swapped, ensure its not shared */
3185 	KASSERT(M_WRITABLE(*mp), ("%s: modifying a shared mbuf", __func__));
3186 #endif
3187 
3188 	if (V_pfil_bridge == 0 && V_pfil_member == 0 && V_pfil_ipfw == 0)
3189 		return (0); /* filtering is disabled */
3190 
3191 	i = min((*mp)->m_pkthdr.len, max_protohdr);
3192 	if ((*mp)->m_len < i) {
3193 	    *mp = m_pullup(*mp, i);
3194 	    if (*mp == NULL) {
3195 		printf("%s: m_pullup failed\n", __func__);
3196 		return (-1);
3197 	    }
3198 	}
3199 
3200 	eh1 = mtod(*mp, struct ether_header *);
3201 	ether_type = ntohs(eh1->ether_type);
3202 
3203 	/*
3204 	 * Check for SNAP/LLC.
3205 	 */
3206 	if (ether_type < ETHERMTU) {
3207 		struct llc *llc2 = (struct llc *)(eh1 + 1);
3208 
3209 		if ((*mp)->m_len >= ETHER_HDR_LEN + 8 &&
3210 		    llc2->llc_dsap == LLC_SNAP_LSAP &&
3211 		    llc2->llc_ssap == LLC_SNAP_LSAP &&
3212 		    llc2->llc_control == LLC_UI) {
3213 			ether_type = htons(llc2->llc_un.type_snap.ether_type);
3214 			snap = 1;
3215 		}
3216 	}
3217 
3218 	/*
3219 	 * If we're trying to filter bridge traffic, don't look at anything
3220 	 * other than IP and ARP traffic.  If the filter doesn't understand
3221 	 * IPv6, don't allow IPv6 through the bridge either.  This is lame
3222 	 * since if we really wanted, say, an AppleTalk filter, we are hosed,
3223 	 * but of course we don't have an AppleTalk filter to begin with.
3224 	 * (Note that since pfil doesn't understand ARP it will pass *ALL*
3225 	 * ARP traffic.)
3226 	 */
3227 	switch (ether_type) {
3228 		case ETHERTYPE_ARP:
3229 		case ETHERTYPE_REVARP:
3230 			if (V_pfil_ipfw_arp == 0)
3231 				return (0); /* Automatically pass */
3232 			break;
3233 
3234 		case ETHERTYPE_IP:
3235 #ifdef INET6
3236 		case ETHERTYPE_IPV6:
3237 #endif /* INET6 */
3238 			break;
3239 		default:
3240 			/*
3241 			 * Check to see if the user wants to pass non-ip
3242 			 * packets, these will not be checked by pfil(9) and
3243 			 * passed unconditionally so the default is to drop.
3244 			 */
3245 			if (V_pfil_onlyip)
3246 				goto bad;
3247 	}
3248 
3249 	/* Run the packet through pfil before stripping link headers */
3250 	if (PFIL_HOOKED_OUT(V_link_pfil_head) && V_pfil_ipfw != 0 &&
3251 	    dir == PFIL_OUT && ifp != NULL) {
3252 		switch (pfil_run_hooks(V_link_pfil_head, mp, ifp, dir, NULL)) {
3253 		case PFIL_DROPPED:
3254 			return (EACCES);
3255 		case PFIL_CONSUMED:
3256 			return (0);
3257 		}
3258 	}
3259 
3260 	/* Strip off the Ethernet header and keep a copy. */
3261 	m_copydata(*mp, 0, ETHER_HDR_LEN, (caddr_t) &eh2);
3262 	m_adj(*mp, ETHER_HDR_LEN);
3263 
3264 	/* Strip off snap header, if present */
3265 	if (snap) {
3266 		m_copydata(*mp, 0, sizeof(struct llc), (caddr_t) &llc1);
3267 		m_adj(*mp, sizeof(struct llc));
3268 	}
3269 
3270 	/*
3271 	 * Check the IP header for alignment and errors
3272 	 */
3273 	if (dir == PFIL_IN) {
3274 		switch (ether_type) {
3275 			case ETHERTYPE_IP:
3276 				error = bridge_ip_checkbasic(mp);
3277 				break;
3278 #ifdef INET6
3279 			case ETHERTYPE_IPV6:
3280 				error = bridge_ip6_checkbasic(mp);
3281 				break;
3282 #endif /* INET6 */
3283 			default:
3284 				error = 0;
3285 		}
3286 		if (error)
3287 			goto bad;
3288 	}
3289 
3290 	error = 0;
3291 
3292 	/*
3293 	 * Run the packet through pfil
3294 	 */
3295 	rv = PFIL_PASS;
3296 	switch (ether_type) {
3297 	case ETHERTYPE_IP:
3298 		/*
3299 		 * Run pfil on the member interface and the bridge, both can
3300 		 * be skipped by clearing pfil_member or pfil_bridge.
3301 		 *
3302 		 * Keep the order:
3303 		 *   in_if -> bridge_if -> out_if
3304 		 */
3305 		if (V_pfil_bridge && dir == PFIL_OUT && bifp != NULL && (rv =
3306 		    pfil_run_hooks(V_inet_pfil_head, mp, bifp, dir, NULL)) !=
3307 		    PFIL_PASS)
3308 			break;
3309 
3310 		if (V_pfil_member && ifp != NULL && (rv =
3311 		    pfil_run_hooks(V_inet_pfil_head, mp, ifp, dir, NULL)) !=
3312 		    PFIL_PASS)
3313 			break;
3314 
3315 		if (V_pfil_bridge && dir == PFIL_IN && bifp != NULL && (rv =
3316 		    pfil_run_hooks(V_inet_pfil_head, mp, bifp, dir, NULL)) !=
3317 		    PFIL_PASS)
3318 			break;
3319 
3320 		/* check if we need to fragment the packet */
3321 		/* bridge_fragment generates a mbuf chain of packets */
3322 		/* that already include eth headers */
3323 		if (V_pfil_member && ifp != NULL && dir == PFIL_OUT) {
3324 			i = (*mp)->m_pkthdr.len;
3325 			if (i > ifp->if_mtu) {
3326 				error = bridge_fragment(ifp, mp, &eh2, snap,
3327 					    &llc1);
3328 				return (error);
3329 			}
3330 		}
3331 
3332 		/* Recalculate the ip checksum. */
3333 		ip = mtod(*mp, struct ip *);
3334 		hlen = ip->ip_hl << 2;
3335 		if (hlen < sizeof(struct ip))
3336 			goto bad;
3337 		if (hlen > (*mp)->m_len) {
3338 			if ((*mp = m_pullup(*mp, hlen)) == NULL)
3339 				goto bad;
3340 			ip = mtod(*mp, struct ip *);
3341 			if (ip == NULL)
3342 				goto bad;
3343 		}
3344 		ip->ip_sum = 0;
3345 		if (hlen == sizeof(struct ip))
3346 			ip->ip_sum = in_cksum_hdr(ip);
3347 		else
3348 			ip->ip_sum = in_cksum(*mp, hlen);
3349 
3350 		break;
3351 #ifdef INET6
3352 	case ETHERTYPE_IPV6:
3353 		if (V_pfil_bridge && dir == PFIL_OUT && bifp != NULL && (rv =
3354 		    pfil_run_hooks(V_inet6_pfil_head, mp, bifp, dir, NULL)) !=
3355 		    PFIL_PASS)
3356 			break;
3357 
3358 		if (V_pfil_member && ifp != NULL && (rv =
3359 		    pfil_run_hooks(V_inet6_pfil_head, mp, ifp, dir, NULL)) !=
3360 		    PFIL_PASS)
3361 			break;
3362 
3363 		if (V_pfil_bridge && dir == PFIL_IN && bifp != NULL && (rv =
3364 		    pfil_run_hooks(V_inet6_pfil_head, mp, bifp, dir, NULL)) !=
3365 		    PFIL_PASS)
3366 			break;
3367 		break;
3368 #endif
3369 	}
3370 
3371 	switch (rv) {
3372 	case PFIL_CONSUMED:
3373 		return (0);
3374 	case PFIL_DROPPED:
3375 		return (EACCES);
3376 	default:
3377 		break;
3378 	}
3379 
3380 	error = -1;
3381 
3382 	/*
3383 	 * Finally, put everything back the way it was and return
3384 	 */
3385 	if (snap) {
3386 		M_PREPEND(*mp, sizeof(struct llc), M_NOWAIT);
3387 		if (*mp == NULL)
3388 			return (error);
3389 		bcopy(&llc1, mtod(*mp, caddr_t), sizeof(struct llc));
3390 	}
3391 
3392 	M_PREPEND(*mp, ETHER_HDR_LEN, M_NOWAIT);
3393 	if (*mp == NULL)
3394 		return (error);
3395 	bcopy(&eh2, mtod(*mp, caddr_t), ETHER_HDR_LEN);
3396 
3397 	return (0);
3398 
3399 bad:
3400 	m_freem(*mp);
3401 	*mp = NULL;
3402 	return (error);
3403 }
3404 
3405 /*
3406  * Perform basic checks on header size since
3407  * pfil assumes ip_input has already processed
3408  * it for it.  Cut-and-pasted from ip_input.c.
3409  * Given how simple the IPv6 version is,
3410  * does the IPv4 version really need to be
3411  * this complicated?
3412  *
3413  * XXX Should we update ipstat here, or not?
3414  * XXX Right now we update ipstat but not
3415  * XXX csum_counter.
3416  */
3417 static int
3418 bridge_ip_checkbasic(struct mbuf **mp)
3419 {
3420 	struct mbuf *m = *mp;
3421 	struct ip *ip;
3422 	int len, hlen;
3423 	u_short sum;
3424 
3425 	if (*mp == NULL)
3426 		return (-1);
3427 
3428 	if (IP_HDR_ALIGNED_P(mtod(m, caddr_t)) == 0) {
3429 		if ((m = m_copyup(m, sizeof(struct ip),
3430 			(max_linkhdr + 3) & ~3)) == NULL) {
3431 			/* XXXJRT new stat, please */
3432 			KMOD_IPSTAT_INC(ips_toosmall);
3433 			goto bad;
3434 		}
3435 	} else if (__predict_false(m->m_len < sizeof (struct ip))) {
3436 		if ((m = m_pullup(m, sizeof (struct ip))) == NULL) {
3437 			KMOD_IPSTAT_INC(ips_toosmall);
3438 			goto bad;
3439 		}
3440 	}
3441 	ip = mtod(m, struct ip *);
3442 	if (ip == NULL) goto bad;
3443 
3444 	if (ip->ip_v != IPVERSION) {
3445 		KMOD_IPSTAT_INC(ips_badvers);
3446 		goto bad;
3447 	}
3448 	hlen = ip->ip_hl << 2;
3449 	if (hlen < sizeof(struct ip)) { /* minimum header length */
3450 		KMOD_IPSTAT_INC(ips_badhlen);
3451 		goto bad;
3452 	}
3453 	if (hlen > m->m_len) {
3454 		if ((m = m_pullup(m, hlen)) == NULL) {
3455 			KMOD_IPSTAT_INC(ips_badhlen);
3456 			goto bad;
3457 		}
3458 		ip = mtod(m, struct ip *);
3459 		if (ip == NULL) goto bad;
3460 	}
3461 
3462 	if (m->m_pkthdr.csum_flags & CSUM_IP_CHECKED) {
3463 		sum = !(m->m_pkthdr.csum_flags & CSUM_IP_VALID);
3464 	} else {
3465 		if (hlen == sizeof(struct ip)) {
3466 			sum = in_cksum_hdr(ip);
3467 		} else {
3468 			sum = in_cksum(m, hlen);
3469 		}
3470 	}
3471 	if (sum) {
3472 		KMOD_IPSTAT_INC(ips_badsum);
3473 		goto bad;
3474 	}
3475 
3476 	/* Retrieve the packet length. */
3477 	len = ntohs(ip->ip_len);
3478 
3479 	/*
3480 	 * Check for additional length bogosity
3481 	 */
3482 	if (len < hlen) {
3483 		KMOD_IPSTAT_INC(ips_badlen);
3484 		goto bad;
3485 	}
3486 
3487 	/*
3488 	 * Check that the amount of data in the buffers
3489 	 * is as at least much as the IP header would have us expect.
3490 	 * Drop packet if shorter than we expect.
3491 	 */
3492 	if (m->m_pkthdr.len < len) {
3493 		KMOD_IPSTAT_INC(ips_tooshort);
3494 		goto bad;
3495 	}
3496 
3497 	/* Checks out, proceed */
3498 	*mp = m;
3499 	return (0);
3500 
3501 bad:
3502 	*mp = m;
3503 	return (-1);
3504 }
3505 
3506 #ifdef INET6
3507 /*
3508  * Same as above, but for IPv6.
3509  * Cut-and-pasted from ip6_input.c.
3510  * XXX Should we update ip6stat, or not?
3511  */
3512 static int
3513 bridge_ip6_checkbasic(struct mbuf **mp)
3514 {
3515 	struct mbuf *m = *mp;
3516 	struct ip6_hdr *ip6;
3517 
3518 	/*
3519 	 * If the IPv6 header is not aligned, slurp it up into a new
3520 	 * mbuf with space for link headers, in the event we forward
3521 	 * it.  Otherwise, if it is aligned, make sure the entire base
3522 	 * IPv6 header is in the first mbuf of the chain.
3523 	 */
3524 	if (IP6_HDR_ALIGNED_P(mtod(m, caddr_t)) == 0) {
3525 		struct ifnet *inifp = m->m_pkthdr.rcvif;
3526 		if ((m = m_copyup(m, sizeof(struct ip6_hdr),
3527 			    (max_linkhdr + 3) & ~3)) == NULL) {
3528 			/* XXXJRT new stat, please */
3529 			IP6STAT_INC(ip6s_toosmall);
3530 			in6_ifstat_inc(inifp, ifs6_in_hdrerr);
3531 			goto bad;
3532 		}
3533 	} else if (__predict_false(m->m_len < sizeof(struct ip6_hdr))) {
3534 		struct ifnet *inifp = m->m_pkthdr.rcvif;
3535 		if ((m = m_pullup(m, sizeof(struct ip6_hdr))) == NULL) {
3536 			IP6STAT_INC(ip6s_toosmall);
3537 			in6_ifstat_inc(inifp, ifs6_in_hdrerr);
3538 			goto bad;
3539 		}
3540 	}
3541 
3542 	ip6 = mtod(m, struct ip6_hdr *);
3543 
3544 	if ((ip6->ip6_vfc & IPV6_VERSION_MASK) != IPV6_VERSION) {
3545 		IP6STAT_INC(ip6s_badvers);
3546 		in6_ifstat_inc(m->m_pkthdr.rcvif, ifs6_in_hdrerr);
3547 		goto bad;
3548 	}
3549 
3550 	/* Checks out, proceed */
3551 	*mp = m;
3552 	return (0);
3553 
3554 bad:
3555 	*mp = m;
3556 	return (-1);
3557 }
3558 #endif /* INET6 */
3559 
3560 /*
3561  * bridge_fragment:
3562  *
3563  *	Fragment mbuf chain in multiple packets and prepend ethernet header.
3564  */
3565 static int
3566 bridge_fragment(struct ifnet *ifp, struct mbuf **mp, struct ether_header *eh,
3567     int snap, struct llc *llc)
3568 {
3569 	struct mbuf *m = *mp, *nextpkt = NULL, *mprev = NULL, *mcur = NULL;
3570 	struct ip *ip;
3571 	int error = -1;
3572 
3573 	if (m->m_len < sizeof(struct ip) &&
3574 	    (m = m_pullup(m, sizeof(struct ip))) == NULL)
3575 		goto dropit;
3576 	ip = mtod(m, struct ip *);
3577 
3578 	m->m_pkthdr.csum_flags |= CSUM_IP;
3579 	error = ip_fragment(ip, &m, ifp->if_mtu, ifp->if_hwassist);
3580 	if (error)
3581 		goto dropit;
3582 
3583 	/*
3584 	 * Walk the chain and re-add the Ethernet header for
3585 	 * each mbuf packet.
3586 	 */
3587 	for (mcur = m; mcur; mcur = mcur->m_nextpkt) {
3588 		nextpkt = mcur->m_nextpkt;
3589 		mcur->m_nextpkt = NULL;
3590 		if (snap) {
3591 			M_PREPEND(mcur, sizeof(struct llc), M_NOWAIT);
3592 			if (mcur == NULL) {
3593 				error = ENOBUFS;
3594 				if (mprev != NULL)
3595 					mprev->m_nextpkt = nextpkt;
3596 				goto dropit;
3597 			}
3598 			bcopy(llc, mtod(mcur, caddr_t),sizeof(struct llc));
3599 		}
3600 
3601 		M_PREPEND(mcur, ETHER_HDR_LEN, M_NOWAIT);
3602 		if (mcur == NULL) {
3603 			error = ENOBUFS;
3604 			if (mprev != NULL)
3605 				mprev->m_nextpkt = nextpkt;
3606 			goto dropit;
3607 		}
3608 		bcopy(eh, mtod(mcur, caddr_t), ETHER_HDR_LEN);
3609 
3610 		/*
3611 		 * The previous two M_PREPEND could have inserted one or two
3612 		 * mbufs in front so we have to update the previous packet's
3613 		 * m_nextpkt.
3614 		 */
3615 		mcur->m_nextpkt = nextpkt;
3616 		if (mprev != NULL)
3617 			mprev->m_nextpkt = mcur;
3618 		else {
3619 			/* The first mbuf in the original chain needs to be
3620 			 * updated. */
3621 			*mp = mcur;
3622 		}
3623 		mprev = mcur;
3624 	}
3625 
3626 	KMOD_IPSTAT_INC(ips_fragmented);
3627 	return (error);
3628 
3629 dropit:
3630 	for (mcur = *mp; mcur; mcur = m) { /* droping the full packet chain */
3631 		m = mcur->m_nextpkt;
3632 		m_freem(mcur);
3633 	}
3634 	return (error);
3635 }
3636 
3637 static void
3638 bridge_linkstate(struct ifnet *ifp)
3639 {
3640 	struct bridge_softc *sc = ifp->if_bridge;
3641 	struct bridge_iflist *bif;
3642 	struct epoch_tracker et;
3643 
3644 	NET_EPOCH_ENTER(et);
3645 
3646 	bif = bridge_lookup_member_if(sc, ifp);
3647 	if (bif == NULL) {
3648 		NET_EPOCH_EXIT(et);
3649 		return;
3650 	}
3651 	bridge_linkcheck(sc);
3652 
3653 	bstp_linkstate(&bif->bif_stp);
3654 
3655 	NET_EPOCH_EXIT(et);
3656 }
3657 
3658 static void
3659 bridge_linkcheck(struct bridge_softc *sc)
3660 {
3661 	struct bridge_iflist *bif;
3662 	int new_link, hasls;
3663 
3664 	NET_EPOCH_ASSERT();
3665 
3666 	new_link = LINK_STATE_DOWN;
3667 	hasls = 0;
3668 	/* Our link is considered up if at least one of our ports is active */
3669 	CK_LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
3670 		if (bif->bif_ifp->if_capabilities & IFCAP_LINKSTATE)
3671 			hasls++;
3672 		if (bif->bif_ifp->if_link_state == LINK_STATE_UP) {
3673 			new_link = LINK_STATE_UP;
3674 			break;
3675 		}
3676 	}
3677 	if (!CK_LIST_EMPTY(&sc->sc_iflist) && !hasls) {
3678 		/* If no interfaces support link-state then we default to up */
3679 		new_link = LINK_STATE_UP;
3680 	}
3681 	if_link_state_change(sc->sc_ifp, new_link);
3682 }
3683