xref: /freebsd/sys/net/if_bridge.c (revision af23369a6deaaeb612ab266eb88b8bb8d560c322)
1 /*	$NetBSD: if_bridge.c,v 1.31 2005/06/01 19:45:34 jdc Exp $	*/
2 
3 /*-
4  * SPDX-License-Identifier: BSD-4-Clause
5  *
6  * Copyright 2001 Wasabi Systems, Inc.
7  * All rights reserved.
8  *
9  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  * 3. All advertising materials mentioning features or use of this software
20  *    must display the following acknowledgement:
21  *	This product includes software developed for the NetBSD Project by
22  *	Wasabi Systems, Inc.
23  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
24  *    or promote products derived from this software without specific prior
25  *    written permission.
26  *
27  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
28  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
31  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37  * POSSIBILITY OF SUCH DAMAGE.
38  */
39 
40 /*
41  * Copyright (c) 1999, 2000 Jason L. Wright (jason@thought.net)
42  * All rights reserved.
43  *
44  * Redistribution and use in source and binary forms, with or without
45  * modification, are permitted provided that the following conditions
46  * are met:
47  * 1. Redistributions of source code must retain the above copyright
48  *    notice, this list of conditions and the following disclaimer.
49  * 2. Redistributions in binary form must reproduce the above copyright
50  *    notice, this list of conditions and the following disclaimer in the
51  *    documentation and/or other materials provided with the distribution.
52  *
53  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
54  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
55  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
56  * DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
57  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
58  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
59  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
60  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
61  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
62  * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
63  * POSSIBILITY OF SUCH DAMAGE.
64  *
65  * OpenBSD: if_bridge.c,v 1.60 2001/06/15 03:38:33 itojun Exp
66  */
67 
68 /*
69  * Network interface bridge support.
70  *
71  * TODO:
72  *
73  *	- Currently only supports Ethernet-like interfaces (Ethernet,
74  *	  802.11, VLANs on Ethernet, etc.)  Figure out a nice way
75  *	  to bridge other types of interfaces (maybe consider
76  *	  heterogeneous bridges).
77  */
78 
79 #include <sys/cdefs.h>
80 __FBSDID("$FreeBSD$");
81 
82 #include "opt_inet.h"
83 #include "opt_inet6.h"
84 
85 #include <sys/param.h>
86 #include <sys/eventhandler.h>
87 #include <sys/mbuf.h>
88 #include <sys/malloc.h>
89 #include <sys/protosw.h>
90 #include <sys/systm.h>
91 #include <sys/jail.h>
92 #include <sys/time.h>
93 #include <sys/socket.h> /* for net/if.h */
94 #include <sys/sockio.h>
95 #include <sys/ctype.h>  /* string functions */
96 #include <sys/kernel.h>
97 #include <sys/random.h>
98 #include <sys/syslog.h>
99 #include <sys/sysctl.h>
100 #include <vm/uma.h>
101 #include <sys/module.h>
102 #include <sys/priv.h>
103 #include <sys/proc.h>
104 #include <sys/lock.h>
105 #include <sys/mutex.h>
106 
107 #include <net/bpf.h>
108 #include <net/if.h>
109 #include <net/if_clone.h>
110 #include <net/if_dl.h>
111 #include <net/if_types.h>
112 #include <net/if_var.h>
113 #include <net/if_private.h>
114 #include <net/pfil.h>
115 #include <net/vnet.h>
116 
117 #include <netinet/in.h>
118 #include <netinet/in_systm.h>
119 #include <netinet/in_var.h>
120 #include <netinet/ip.h>
121 #include <netinet/ip_var.h>
122 #ifdef INET6
123 #include <netinet/ip6.h>
124 #include <netinet6/ip6_var.h>
125 #include <netinet6/in6_ifattach.h>
126 #endif
127 #if defined(INET) || defined(INET6)
128 #include <netinet/ip_carp.h>
129 #endif
130 #include <machine/in_cksum.h>
131 #include <netinet/if_ether.h>
132 #include <net/bridgestp.h>
133 #include <net/if_bridgevar.h>
134 #include <net/if_llc.h>
135 #include <net/if_vlan_var.h>
136 
137 #include <net/route.h>
138 
139 #ifdef INET6
140 /*
141  * XXX: declare here to avoid to include many inet6 related files..
142  * should be more generalized?
143  */
144 extern void	nd6_setmtu(struct ifnet *);
145 #endif
146 
147 /*
148  * Size of the route hash table.  Must be a power of two.
149  */
150 #ifndef BRIDGE_RTHASH_SIZE
151 #define	BRIDGE_RTHASH_SIZE		1024
152 #endif
153 
154 #define	BRIDGE_RTHASH_MASK		(BRIDGE_RTHASH_SIZE - 1)
155 
156 /*
157  * Default maximum number of addresses to cache.
158  */
159 #ifndef BRIDGE_RTABLE_MAX
160 #define	BRIDGE_RTABLE_MAX		2000
161 #endif
162 
163 /*
164  * Timeout (in seconds) for entries learned dynamically.
165  */
166 #ifndef BRIDGE_RTABLE_TIMEOUT
167 #define	BRIDGE_RTABLE_TIMEOUT		(20 * 60)	/* same as ARP */
168 #endif
169 
170 /*
171  * Number of seconds between walks of the route list.
172  */
173 #ifndef BRIDGE_RTABLE_PRUNE_PERIOD
174 #define	BRIDGE_RTABLE_PRUNE_PERIOD	(5 * 60)
175 #endif
176 
177 /*
178  * List of capabilities to possibly mask on the member interface.
179  */
180 #define	BRIDGE_IFCAPS_MASK		(IFCAP_TOE|IFCAP_TSO|IFCAP_TXCSUM|\
181 					 IFCAP_TXCSUM_IPV6)
182 
183 /*
184  * List of capabilities to strip
185  */
186 #define	BRIDGE_IFCAPS_STRIP		IFCAP_LRO
187 
188 /*
189  * Bridge locking
190  *
191  * The bridge relies heavily on the epoch(9) system to protect its data
192  * structures. This means we can safely use CK_LISTs while in NET_EPOCH, but we
193  * must ensure there is only one writer at a time.
194  *
195  * That is: for read accesses we only need to be in NET_EPOCH, but for write
196  * accesses we must hold:
197  *
198  *  - BRIDGE_RT_LOCK, for any change to bridge_rtnodes
199  *  - BRIDGE_LOCK, for any other change
200  *
201  * The BRIDGE_LOCK is a sleepable lock, because it is held across ioctl()
202  * calls to bridge member interfaces and these ioctl()s can sleep.
203  * The BRIDGE_RT_LOCK is a non-sleepable mutex, because it is sometimes
204  * required while we're in NET_EPOCH and then we're not allowed to sleep.
205  */
206 #define BRIDGE_LOCK_INIT(_sc)		do {			\
207 	sx_init(&(_sc)->sc_sx, "if_bridge");			\
208 	mtx_init(&(_sc)->sc_rt_mtx, "if_bridge rt", NULL, MTX_DEF);	\
209 } while (0)
210 #define BRIDGE_LOCK_DESTROY(_sc)	do {	\
211 	sx_destroy(&(_sc)->sc_sx);		\
212 	mtx_destroy(&(_sc)->sc_rt_mtx);		\
213 } while (0)
214 #define BRIDGE_LOCK(_sc)		sx_xlock(&(_sc)->sc_sx)
215 #define BRIDGE_UNLOCK(_sc)		sx_xunlock(&(_sc)->sc_sx)
216 #define BRIDGE_LOCK_ASSERT(_sc)		sx_assert(&(_sc)->sc_sx, SX_XLOCKED)
217 #define BRIDGE_LOCK_OR_NET_EPOCH_ASSERT(_sc)	\
218 	    MPASS(in_epoch(net_epoch_preempt) || sx_xlocked(&(_sc)->sc_sx))
219 #define BRIDGE_UNLOCK_ASSERT(_sc)	sx_assert(&(_sc)->sc_sx, SX_UNLOCKED)
220 #define BRIDGE_RT_LOCK(_sc)		mtx_lock(&(_sc)->sc_rt_mtx)
221 #define BRIDGE_RT_UNLOCK(_sc)		mtx_unlock(&(_sc)->sc_rt_mtx)
222 #define BRIDGE_RT_LOCK_ASSERT(_sc)	mtx_assert(&(_sc)->sc_rt_mtx, MA_OWNED)
223 #define BRIDGE_RT_LOCK_OR_NET_EPOCH_ASSERT(_sc)	\
224 	    MPASS(in_epoch(net_epoch_preempt) || mtx_owned(&(_sc)->sc_rt_mtx))
225 
226 /*
227  * Bridge interface list entry.
228  */
229 struct bridge_iflist {
230 	CK_LIST_ENTRY(bridge_iflist) bif_next;
231 	struct ifnet		*bif_ifp;	/* member if */
232 	struct bstp_port	bif_stp;	/* STP state */
233 	uint32_t		bif_flags;	/* member if flags */
234 	int			bif_savedcaps;	/* saved capabilities */
235 	uint32_t		bif_addrmax;	/* max # of addresses */
236 	uint32_t		bif_addrcnt;	/* cur. # of addresses */
237 	uint32_t		bif_addrexceeded;/* # of address violations */
238 	struct epoch_context	bif_epoch_ctx;
239 };
240 
241 /*
242  * Bridge route node.
243  */
244 struct bridge_rtnode {
245 	CK_LIST_ENTRY(bridge_rtnode) brt_hash;	/* hash table linkage */
246 	CK_LIST_ENTRY(bridge_rtnode) brt_list;	/* list linkage */
247 	struct bridge_iflist	*brt_dst;	/* destination if */
248 	unsigned long		brt_expire;	/* expiration time */
249 	uint8_t			brt_flags;	/* address flags */
250 	uint8_t			brt_addr[ETHER_ADDR_LEN];
251 	uint16_t		brt_vlan;	/* vlan id */
252 	struct	vnet		*brt_vnet;
253 	struct	epoch_context	brt_epoch_ctx;
254 };
255 #define	brt_ifp			brt_dst->bif_ifp
256 
257 /*
258  * Software state for each bridge.
259  */
260 struct bridge_softc {
261 	struct ifnet		*sc_ifp;	/* make this an interface */
262 	LIST_ENTRY(bridge_softc) sc_list;
263 	struct sx		sc_sx;
264 	struct mtx		sc_rt_mtx;
265 	uint32_t		sc_brtmax;	/* max # of addresses */
266 	uint32_t		sc_brtcnt;	/* cur. # of addresses */
267 	uint32_t		sc_brttimeout;	/* rt timeout in seconds */
268 	struct callout		sc_brcallout;	/* bridge callout */
269 	CK_LIST_HEAD(, bridge_iflist) sc_iflist;	/* member interface list */
270 	CK_LIST_HEAD(, bridge_rtnode) *sc_rthash;	/* our forwarding table */
271 	CK_LIST_HEAD(, bridge_rtnode) sc_rtlist;	/* list version of above */
272 	uint32_t		sc_rthash_key;	/* key for hash */
273 	CK_LIST_HEAD(, bridge_iflist) sc_spanlist;	/* span ports list */
274 	struct bstp_state	sc_stp;		/* STP state */
275 	uint32_t		sc_brtexceeded;	/* # of cache drops */
276 	struct ifnet		*sc_ifaddr;	/* member mac copied from */
277 	struct ether_addr	sc_defaddr;	/* Default MAC address */
278 	struct epoch_context	sc_epoch_ctx;
279 };
280 
281 VNET_DEFINE_STATIC(struct sx, bridge_list_sx);
282 #define	V_bridge_list_sx	VNET(bridge_list_sx)
283 static eventhandler_tag bridge_detach_cookie;
284 
285 int	bridge_rtable_prune_period = BRIDGE_RTABLE_PRUNE_PERIOD;
286 
287 VNET_DEFINE_STATIC(uma_zone_t, bridge_rtnode_zone);
288 #define	V_bridge_rtnode_zone	VNET(bridge_rtnode_zone)
289 
290 static int	bridge_clone_create(struct if_clone *, char *, size_t,
291 		    struct ifc_data *, struct ifnet **);
292 static int	bridge_clone_destroy(struct if_clone *, struct ifnet *, uint32_t);
293 
294 static int	bridge_ioctl(struct ifnet *, u_long, caddr_t);
295 static void	bridge_mutecaps(struct bridge_softc *);
296 static void	bridge_set_ifcap(struct bridge_softc *, struct bridge_iflist *,
297 		    int);
298 static void	bridge_ifdetach(void *arg __unused, struct ifnet *);
299 static void	bridge_init(void *);
300 static void	bridge_dummynet(struct mbuf *, struct ifnet *);
301 static void	bridge_stop(struct ifnet *, int);
302 static int	bridge_transmit(struct ifnet *, struct mbuf *);
303 #ifdef ALTQ
304 static void	bridge_altq_start(if_t);
305 static int	bridge_altq_transmit(if_t, struct mbuf *);
306 #endif
307 static void	bridge_qflush(struct ifnet *);
308 static struct mbuf *bridge_input(struct ifnet *, struct mbuf *);
309 static int	bridge_output(struct ifnet *, struct mbuf *, struct sockaddr *,
310 		    struct rtentry *);
311 static int	bridge_enqueue(struct bridge_softc *, struct ifnet *,
312 		    struct mbuf *);
313 static void	bridge_rtdelete(struct bridge_softc *, struct ifnet *ifp, int);
314 
315 static void	bridge_forward(struct bridge_softc *, struct bridge_iflist *,
316 		    struct mbuf *m);
317 
318 static void	bridge_timer(void *);
319 
320 static void	bridge_broadcast(struct bridge_softc *, struct ifnet *,
321 		    struct mbuf *, int);
322 static void	bridge_span(struct bridge_softc *, struct mbuf *);
323 
324 static int	bridge_rtupdate(struct bridge_softc *, const uint8_t *,
325 		    uint16_t, struct bridge_iflist *, int, uint8_t);
326 static struct ifnet *bridge_rtlookup(struct bridge_softc *, const uint8_t *,
327 		    uint16_t);
328 static void	bridge_rttrim(struct bridge_softc *);
329 static void	bridge_rtage(struct bridge_softc *);
330 static void	bridge_rtflush(struct bridge_softc *, int);
331 static int	bridge_rtdaddr(struct bridge_softc *, const uint8_t *,
332 		    uint16_t);
333 
334 static void	bridge_rtable_init(struct bridge_softc *);
335 static void	bridge_rtable_fini(struct bridge_softc *);
336 
337 static int	bridge_rtnode_addr_cmp(const uint8_t *, const uint8_t *);
338 static struct bridge_rtnode *bridge_rtnode_lookup(struct bridge_softc *,
339 		    const uint8_t *, uint16_t);
340 static int	bridge_rtnode_insert(struct bridge_softc *,
341 		    struct bridge_rtnode *);
342 static void	bridge_rtnode_destroy(struct bridge_softc *,
343 		    struct bridge_rtnode *);
344 static void	bridge_rtable_expire(struct ifnet *, int);
345 static void	bridge_state_change(struct ifnet *, int);
346 
347 static struct bridge_iflist *bridge_lookup_member(struct bridge_softc *,
348 		    const char *name);
349 static struct bridge_iflist *bridge_lookup_member_if(struct bridge_softc *,
350 		    struct ifnet *ifp);
351 static void	bridge_delete_member(struct bridge_softc *,
352 		    struct bridge_iflist *, int);
353 static void	bridge_delete_span(struct bridge_softc *,
354 		    struct bridge_iflist *);
355 
356 static int	bridge_ioctl_add(struct bridge_softc *, void *);
357 static int	bridge_ioctl_del(struct bridge_softc *, void *);
358 static int	bridge_ioctl_gifflags(struct bridge_softc *, void *);
359 static int	bridge_ioctl_sifflags(struct bridge_softc *, void *);
360 static int	bridge_ioctl_scache(struct bridge_softc *, void *);
361 static int	bridge_ioctl_gcache(struct bridge_softc *, void *);
362 static int	bridge_ioctl_gifs(struct bridge_softc *, void *);
363 static int	bridge_ioctl_rts(struct bridge_softc *, void *);
364 static int	bridge_ioctl_saddr(struct bridge_softc *, void *);
365 static int	bridge_ioctl_sto(struct bridge_softc *, void *);
366 static int	bridge_ioctl_gto(struct bridge_softc *, void *);
367 static int	bridge_ioctl_daddr(struct bridge_softc *, void *);
368 static int	bridge_ioctl_flush(struct bridge_softc *, void *);
369 static int	bridge_ioctl_gpri(struct bridge_softc *, void *);
370 static int	bridge_ioctl_spri(struct bridge_softc *, void *);
371 static int	bridge_ioctl_ght(struct bridge_softc *, void *);
372 static int	bridge_ioctl_sht(struct bridge_softc *, void *);
373 static int	bridge_ioctl_gfd(struct bridge_softc *, void *);
374 static int	bridge_ioctl_sfd(struct bridge_softc *, void *);
375 static int	bridge_ioctl_gma(struct bridge_softc *, void *);
376 static int	bridge_ioctl_sma(struct bridge_softc *, void *);
377 static int	bridge_ioctl_sifprio(struct bridge_softc *, void *);
378 static int	bridge_ioctl_sifcost(struct bridge_softc *, void *);
379 static int	bridge_ioctl_sifmaxaddr(struct bridge_softc *, void *);
380 static int	bridge_ioctl_addspan(struct bridge_softc *, void *);
381 static int	bridge_ioctl_delspan(struct bridge_softc *, void *);
382 static int	bridge_ioctl_gbparam(struct bridge_softc *, void *);
383 static int	bridge_ioctl_grte(struct bridge_softc *, void *);
384 static int	bridge_ioctl_gifsstp(struct bridge_softc *, void *);
385 static int	bridge_ioctl_sproto(struct bridge_softc *, void *);
386 static int	bridge_ioctl_stxhc(struct bridge_softc *, void *);
387 static int	bridge_pfil(struct mbuf **, struct ifnet *, struct ifnet *,
388 		    int);
389 static int	bridge_ip_checkbasic(struct mbuf **mp);
390 #ifdef INET6
391 static int	bridge_ip6_checkbasic(struct mbuf **mp);
392 #endif /* INET6 */
393 static int	bridge_fragment(struct ifnet *, struct mbuf **mp,
394 		    struct ether_header *, int, struct llc *);
395 static void	bridge_linkstate(struct ifnet *ifp);
396 static void	bridge_linkcheck(struct bridge_softc *sc);
397 
398 /* The default bridge vlan is 1 (IEEE 802.1Q-2003 Table 9-2) */
399 #define	VLANTAGOF(_m)	\
400     (_m->m_flags & M_VLANTAG) ? EVL_VLANOFTAG(_m->m_pkthdr.ether_vtag) : 1
401 
402 static struct bstp_cb_ops bridge_ops = {
403 	.bcb_state = bridge_state_change,
404 	.bcb_rtage = bridge_rtable_expire
405 };
406 
407 SYSCTL_DECL(_net_link);
408 static SYSCTL_NODE(_net_link, IFT_BRIDGE, bridge, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
409     "Bridge");
410 
411 /* only pass IP[46] packets when pfil is enabled */
412 VNET_DEFINE_STATIC(int, pfil_onlyip) = 1;
413 #define	V_pfil_onlyip	VNET(pfil_onlyip)
414 SYSCTL_INT(_net_link_bridge, OID_AUTO, pfil_onlyip,
415     CTLFLAG_RWTUN | CTLFLAG_VNET, &VNET_NAME(pfil_onlyip), 0,
416     "Only pass IP packets when pfil is enabled");
417 
418 /* run pfil hooks on the bridge interface */
419 VNET_DEFINE_STATIC(int, pfil_bridge) = 0;
420 #define	V_pfil_bridge	VNET(pfil_bridge)
421 SYSCTL_INT(_net_link_bridge, OID_AUTO, pfil_bridge,
422     CTLFLAG_RWTUN | CTLFLAG_VNET, &VNET_NAME(pfil_bridge), 0,
423     "Packet filter on the bridge interface");
424 
425 /* layer2 filter with ipfw */
426 VNET_DEFINE_STATIC(int, pfil_ipfw);
427 #define	V_pfil_ipfw	VNET(pfil_ipfw)
428 
429 /* layer2 ARP filter with ipfw */
430 VNET_DEFINE_STATIC(int, pfil_ipfw_arp);
431 #define	V_pfil_ipfw_arp	VNET(pfil_ipfw_arp)
432 SYSCTL_INT(_net_link_bridge, OID_AUTO, ipfw_arp,
433     CTLFLAG_RWTUN | CTLFLAG_VNET, &VNET_NAME(pfil_ipfw_arp), 0,
434     "Filter ARP packets through IPFW layer2");
435 
436 /* run pfil hooks on the member interface */
437 VNET_DEFINE_STATIC(int, pfil_member) = 0;
438 #define	V_pfil_member	VNET(pfil_member)
439 SYSCTL_INT(_net_link_bridge, OID_AUTO, pfil_member,
440     CTLFLAG_RWTUN | CTLFLAG_VNET, &VNET_NAME(pfil_member), 0,
441     "Packet filter on the member interface");
442 
443 /* run pfil hooks on the physical interface for locally destined packets */
444 VNET_DEFINE_STATIC(int, pfil_local_phys);
445 #define	V_pfil_local_phys	VNET(pfil_local_phys)
446 SYSCTL_INT(_net_link_bridge, OID_AUTO, pfil_local_phys,
447     CTLFLAG_RWTUN | CTLFLAG_VNET, &VNET_NAME(pfil_local_phys), 0,
448     "Packet filter on the physical interface for locally destined packets");
449 
450 /* log STP state changes */
451 VNET_DEFINE_STATIC(int, log_stp);
452 #define	V_log_stp	VNET(log_stp)
453 SYSCTL_INT(_net_link_bridge, OID_AUTO, log_stp,
454     CTLFLAG_RWTUN | CTLFLAG_VNET, &VNET_NAME(log_stp), 0,
455     "Log STP state changes");
456 
457 /* share MAC with first bridge member */
458 VNET_DEFINE_STATIC(int, bridge_inherit_mac);
459 #define	V_bridge_inherit_mac	VNET(bridge_inherit_mac)
460 SYSCTL_INT(_net_link_bridge, OID_AUTO, inherit_mac,
461     CTLFLAG_RWTUN | CTLFLAG_VNET, &VNET_NAME(bridge_inherit_mac), 0,
462     "Inherit MAC address from the first bridge member");
463 
464 VNET_DEFINE_STATIC(int, allow_llz_overlap) = 0;
465 #define	V_allow_llz_overlap	VNET(allow_llz_overlap)
466 SYSCTL_INT(_net_link_bridge, OID_AUTO, allow_llz_overlap,
467     CTLFLAG_RW | CTLFLAG_VNET, &VNET_NAME(allow_llz_overlap), 0,
468     "Allow overlap of link-local scope "
469     "zones of a bridge interface and the member interfaces");
470 
471 struct bridge_control {
472 	int	(*bc_func)(struct bridge_softc *, void *);
473 	int	bc_argsize;
474 	int	bc_flags;
475 };
476 
477 #define	BC_F_COPYIN		0x01	/* copy arguments in */
478 #define	BC_F_COPYOUT		0x02	/* copy arguments out */
479 #define	BC_F_SUSER		0x04	/* do super-user check */
480 
481 static const struct bridge_control bridge_control_table[] = {
482 	{ bridge_ioctl_add,		sizeof(struct ifbreq),
483 	  BC_F_COPYIN|BC_F_SUSER },
484 	{ bridge_ioctl_del,		sizeof(struct ifbreq),
485 	  BC_F_COPYIN|BC_F_SUSER },
486 
487 	{ bridge_ioctl_gifflags,	sizeof(struct ifbreq),
488 	  BC_F_COPYIN|BC_F_COPYOUT },
489 	{ bridge_ioctl_sifflags,	sizeof(struct ifbreq),
490 	  BC_F_COPYIN|BC_F_SUSER },
491 
492 	{ bridge_ioctl_scache,		sizeof(struct ifbrparam),
493 	  BC_F_COPYIN|BC_F_SUSER },
494 	{ bridge_ioctl_gcache,		sizeof(struct ifbrparam),
495 	  BC_F_COPYOUT },
496 
497 	{ bridge_ioctl_gifs,		sizeof(struct ifbifconf),
498 	  BC_F_COPYIN|BC_F_COPYOUT },
499 	{ bridge_ioctl_rts,		sizeof(struct ifbaconf),
500 	  BC_F_COPYIN|BC_F_COPYOUT },
501 
502 	{ bridge_ioctl_saddr,		sizeof(struct ifbareq),
503 	  BC_F_COPYIN|BC_F_SUSER },
504 
505 	{ bridge_ioctl_sto,		sizeof(struct ifbrparam),
506 	  BC_F_COPYIN|BC_F_SUSER },
507 	{ bridge_ioctl_gto,		sizeof(struct ifbrparam),
508 	  BC_F_COPYOUT },
509 
510 	{ bridge_ioctl_daddr,		sizeof(struct ifbareq),
511 	  BC_F_COPYIN|BC_F_SUSER },
512 
513 	{ bridge_ioctl_flush,		sizeof(struct ifbreq),
514 	  BC_F_COPYIN|BC_F_SUSER },
515 
516 	{ bridge_ioctl_gpri,		sizeof(struct ifbrparam),
517 	  BC_F_COPYOUT },
518 	{ bridge_ioctl_spri,		sizeof(struct ifbrparam),
519 	  BC_F_COPYIN|BC_F_SUSER },
520 
521 	{ bridge_ioctl_ght,		sizeof(struct ifbrparam),
522 	  BC_F_COPYOUT },
523 	{ bridge_ioctl_sht,		sizeof(struct ifbrparam),
524 	  BC_F_COPYIN|BC_F_SUSER },
525 
526 	{ bridge_ioctl_gfd,		sizeof(struct ifbrparam),
527 	  BC_F_COPYOUT },
528 	{ bridge_ioctl_sfd,		sizeof(struct ifbrparam),
529 	  BC_F_COPYIN|BC_F_SUSER },
530 
531 	{ bridge_ioctl_gma,		sizeof(struct ifbrparam),
532 	  BC_F_COPYOUT },
533 	{ bridge_ioctl_sma,		sizeof(struct ifbrparam),
534 	  BC_F_COPYIN|BC_F_SUSER },
535 
536 	{ bridge_ioctl_sifprio,		sizeof(struct ifbreq),
537 	  BC_F_COPYIN|BC_F_SUSER },
538 
539 	{ bridge_ioctl_sifcost,		sizeof(struct ifbreq),
540 	  BC_F_COPYIN|BC_F_SUSER },
541 
542 	{ bridge_ioctl_addspan,		sizeof(struct ifbreq),
543 	  BC_F_COPYIN|BC_F_SUSER },
544 	{ bridge_ioctl_delspan,		sizeof(struct ifbreq),
545 	  BC_F_COPYIN|BC_F_SUSER },
546 
547 	{ bridge_ioctl_gbparam,		sizeof(struct ifbropreq),
548 	  BC_F_COPYOUT },
549 
550 	{ bridge_ioctl_grte,		sizeof(struct ifbrparam),
551 	  BC_F_COPYOUT },
552 
553 	{ bridge_ioctl_gifsstp,		sizeof(struct ifbpstpconf),
554 	  BC_F_COPYIN|BC_F_COPYOUT },
555 
556 	{ bridge_ioctl_sproto,		sizeof(struct ifbrparam),
557 	  BC_F_COPYIN|BC_F_SUSER },
558 
559 	{ bridge_ioctl_stxhc,		sizeof(struct ifbrparam),
560 	  BC_F_COPYIN|BC_F_SUSER },
561 
562 	{ bridge_ioctl_sifmaxaddr,	sizeof(struct ifbreq),
563 	  BC_F_COPYIN|BC_F_SUSER },
564 
565 };
566 static const int bridge_control_table_size = nitems(bridge_control_table);
567 
568 VNET_DEFINE_STATIC(LIST_HEAD(, bridge_softc), bridge_list);
569 #define	V_bridge_list	VNET(bridge_list)
570 #define	BRIDGE_LIST_LOCK_INIT(x)	sx_init(&V_bridge_list_sx,	\
571 					    "if_bridge list")
572 #define	BRIDGE_LIST_LOCK_DESTROY(x)	sx_destroy(&V_bridge_list_sx)
573 #define	BRIDGE_LIST_LOCK(x)		sx_xlock(&V_bridge_list_sx)
574 #define	BRIDGE_LIST_UNLOCK(x)		sx_xunlock(&V_bridge_list_sx)
575 
576 VNET_DEFINE_STATIC(struct if_clone *, bridge_cloner);
577 #define	V_bridge_cloner	VNET(bridge_cloner)
578 
579 static const char bridge_name[] = "bridge";
580 
581 static void
582 vnet_bridge_init(const void *unused __unused)
583 {
584 
585 	V_bridge_rtnode_zone = uma_zcreate("bridge_rtnode",
586 	    sizeof(struct bridge_rtnode), NULL, NULL, NULL, NULL,
587 	    UMA_ALIGN_PTR, 0);
588 	BRIDGE_LIST_LOCK_INIT();
589 	LIST_INIT(&V_bridge_list);
590 
591 	struct if_clone_addreq req = {
592 		.create_f = bridge_clone_create,
593 		.destroy_f = bridge_clone_destroy,
594 		.flags = IFC_F_AUTOUNIT,
595 	};
596 	V_bridge_cloner = ifc_attach_cloner(bridge_name, &req);
597 }
598 VNET_SYSINIT(vnet_bridge_init, SI_SUB_PROTO_IFATTACHDOMAIN, SI_ORDER_ANY,
599     vnet_bridge_init, NULL);
600 
601 static void
602 vnet_bridge_uninit(const void *unused __unused)
603 {
604 
605 	ifc_detach_cloner(V_bridge_cloner);
606 	V_bridge_cloner = NULL;
607 	BRIDGE_LIST_LOCK_DESTROY();
608 
609 	/* Callbacks may use the UMA zone. */
610 	NET_EPOCH_DRAIN_CALLBACKS();
611 
612 	uma_zdestroy(V_bridge_rtnode_zone);
613 }
614 VNET_SYSUNINIT(vnet_bridge_uninit, SI_SUB_PSEUDO, SI_ORDER_ANY,
615     vnet_bridge_uninit, NULL);
616 
617 static int
618 bridge_modevent(module_t mod, int type, void *data)
619 {
620 
621 	switch (type) {
622 	case MOD_LOAD:
623 		bridge_dn_p = bridge_dummynet;
624 		bridge_detach_cookie = EVENTHANDLER_REGISTER(
625 		    ifnet_departure_event, bridge_ifdetach, NULL,
626 		    EVENTHANDLER_PRI_ANY);
627 		break;
628 	case MOD_UNLOAD:
629 		EVENTHANDLER_DEREGISTER(ifnet_departure_event,
630 		    bridge_detach_cookie);
631 		bridge_dn_p = NULL;
632 		break;
633 	default:
634 		return (EOPNOTSUPP);
635 	}
636 	return (0);
637 }
638 
639 static moduledata_t bridge_mod = {
640 	"if_bridge",
641 	bridge_modevent,
642 	0
643 };
644 
645 DECLARE_MODULE(if_bridge, bridge_mod, SI_SUB_PSEUDO, SI_ORDER_ANY);
646 MODULE_VERSION(if_bridge, 1);
647 MODULE_DEPEND(if_bridge, bridgestp, 1, 1, 1);
648 
649 /*
650  * handler for net.link.bridge.ipfw
651  */
652 static int
653 sysctl_pfil_ipfw(SYSCTL_HANDLER_ARGS)
654 {
655 	int enable = V_pfil_ipfw;
656 	int error;
657 
658 	error = sysctl_handle_int(oidp, &enable, 0, req);
659 	enable &= 1;
660 
661 	if (enable != V_pfil_ipfw) {
662 		V_pfil_ipfw = enable;
663 
664 		/*
665 		 * Disable pfil so that ipfw doesnt run twice, if the user
666 		 * really wants both then they can re-enable pfil_bridge and/or
667 		 * pfil_member. Also allow non-ip packets as ipfw can filter by
668 		 * layer2 type.
669 		 */
670 		if (V_pfil_ipfw) {
671 			V_pfil_onlyip = 0;
672 			V_pfil_bridge = 0;
673 			V_pfil_member = 0;
674 		}
675 	}
676 
677 	return (error);
678 }
679 SYSCTL_PROC(_net_link_bridge, OID_AUTO, ipfw,
680     CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_VNET | CTLFLAG_NEEDGIANT,
681     &VNET_NAME(pfil_ipfw), 0, &sysctl_pfil_ipfw, "I",
682     "Layer2 filter with IPFW");
683 
684 #ifdef VIMAGE
685 static void
686 bridge_reassign(struct ifnet *ifp, struct vnet *newvnet, char *arg)
687 {
688 	struct bridge_softc *sc = ifp->if_softc;
689 	struct bridge_iflist *bif;
690 
691 	BRIDGE_LOCK(sc);
692 
693 	while ((bif = CK_LIST_FIRST(&sc->sc_iflist)) != NULL)
694 		bridge_delete_member(sc, bif, 0);
695 
696 	while ((bif = CK_LIST_FIRST(&sc->sc_spanlist)) != NULL) {
697 		bridge_delete_span(sc, bif);
698 	}
699 
700 	BRIDGE_UNLOCK(sc);
701 
702 	ether_reassign(ifp, newvnet, arg);
703 }
704 #endif
705 
706 /*
707  * bridge_clone_create:
708  *
709  *	Create a new bridge instance.
710  */
711 static int
712 bridge_clone_create(struct if_clone *ifc, char *name, size_t len,
713     struct ifc_data *ifd, struct ifnet **ifpp)
714 {
715 	struct bridge_softc *sc;
716 	struct ifnet *ifp;
717 
718 	sc = malloc(sizeof(*sc), M_DEVBUF, M_WAITOK|M_ZERO);
719 	ifp = sc->sc_ifp = if_alloc(IFT_ETHER);
720 	if (ifp == NULL) {
721 		free(sc, M_DEVBUF);
722 		return (ENOSPC);
723 	}
724 
725 	BRIDGE_LOCK_INIT(sc);
726 	sc->sc_brtmax = BRIDGE_RTABLE_MAX;
727 	sc->sc_brttimeout = BRIDGE_RTABLE_TIMEOUT;
728 
729 	/* Initialize our routing table. */
730 	bridge_rtable_init(sc);
731 
732 	callout_init_mtx(&sc->sc_brcallout, &sc->sc_rt_mtx, 0);
733 
734 	CK_LIST_INIT(&sc->sc_iflist);
735 	CK_LIST_INIT(&sc->sc_spanlist);
736 
737 	ifp->if_softc = sc;
738 	if_initname(ifp, bridge_name, ifd->unit);
739 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
740 	ifp->if_ioctl = bridge_ioctl;
741 #ifdef ALTQ
742 	ifp->if_start = bridge_altq_start;
743 	ifp->if_transmit = bridge_altq_transmit;
744 	IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen);
745 	ifp->if_snd.ifq_drv_maxlen = 0;
746 	IFQ_SET_READY(&ifp->if_snd);
747 #else
748 	ifp->if_transmit = bridge_transmit;
749 #endif
750 	ifp->if_qflush = bridge_qflush;
751 	ifp->if_init = bridge_init;
752 	ifp->if_type = IFT_BRIDGE;
753 
754 	ether_gen_addr(ifp, &sc->sc_defaddr);
755 
756 	bstp_attach(&sc->sc_stp, &bridge_ops);
757 	ether_ifattach(ifp, sc->sc_defaddr.octet);
758 	/* Now undo some of the damage... */
759 	ifp->if_baudrate = 0;
760 	ifp->if_type = IFT_BRIDGE;
761 #ifdef VIMAGE
762 	ifp->if_reassign = bridge_reassign;
763 #endif
764 
765 	BRIDGE_LIST_LOCK();
766 	LIST_INSERT_HEAD(&V_bridge_list, sc, sc_list);
767 	BRIDGE_LIST_UNLOCK();
768 	*ifpp = ifp;
769 
770 	return (0);
771 }
772 
773 static void
774 bridge_clone_destroy_cb(struct epoch_context *ctx)
775 {
776 	struct bridge_softc *sc;
777 
778 	sc = __containerof(ctx, struct bridge_softc, sc_epoch_ctx);
779 
780 	BRIDGE_LOCK_DESTROY(sc);
781 	free(sc, M_DEVBUF);
782 }
783 
784 /*
785  * bridge_clone_destroy:
786  *
787  *	Destroy a bridge instance.
788  */
789 static int
790 bridge_clone_destroy(struct if_clone *ifc, struct ifnet *ifp, uint32_t flags)
791 {
792 	struct bridge_softc *sc = ifp->if_softc;
793 	struct bridge_iflist *bif;
794 	struct epoch_tracker et;
795 
796 	BRIDGE_LOCK(sc);
797 
798 	bridge_stop(ifp, 1);
799 	ifp->if_flags &= ~IFF_UP;
800 
801 	while ((bif = CK_LIST_FIRST(&sc->sc_iflist)) != NULL)
802 		bridge_delete_member(sc, bif, 0);
803 
804 	while ((bif = CK_LIST_FIRST(&sc->sc_spanlist)) != NULL) {
805 		bridge_delete_span(sc, bif);
806 	}
807 
808 	/* Tear down the routing table. */
809 	bridge_rtable_fini(sc);
810 
811 	BRIDGE_UNLOCK(sc);
812 
813 	NET_EPOCH_ENTER(et);
814 
815 	callout_drain(&sc->sc_brcallout);
816 
817 	BRIDGE_LIST_LOCK();
818 	LIST_REMOVE(sc, sc_list);
819 	BRIDGE_LIST_UNLOCK();
820 
821 	bstp_detach(&sc->sc_stp);
822 #ifdef ALTQ
823 	IFQ_PURGE(&ifp->if_snd);
824 #endif
825 	NET_EPOCH_EXIT(et);
826 
827 	ether_ifdetach(ifp);
828 	if_free(ifp);
829 
830 	NET_EPOCH_CALL(bridge_clone_destroy_cb, &sc->sc_epoch_ctx);
831 
832 	return (0);
833 }
834 
835 /*
836  * bridge_ioctl:
837  *
838  *	Handle a control request from the operator.
839  */
840 static int
841 bridge_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
842 {
843 	struct bridge_softc *sc = ifp->if_softc;
844 	struct ifreq *ifr = (struct ifreq *)data;
845 	struct bridge_iflist *bif;
846 	struct thread *td = curthread;
847 	union {
848 		struct ifbreq ifbreq;
849 		struct ifbifconf ifbifconf;
850 		struct ifbareq ifbareq;
851 		struct ifbaconf ifbaconf;
852 		struct ifbrparam ifbrparam;
853 		struct ifbropreq ifbropreq;
854 	} args;
855 	struct ifdrv *ifd = (struct ifdrv *) data;
856 	const struct bridge_control *bc;
857 	int error = 0, oldmtu;
858 
859 	BRIDGE_LOCK(sc);
860 
861 	switch (cmd) {
862 	case SIOCADDMULTI:
863 	case SIOCDELMULTI:
864 		break;
865 
866 	case SIOCGDRVSPEC:
867 	case SIOCSDRVSPEC:
868 		if (ifd->ifd_cmd >= bridge_control_table_size) {
869 			error = EINVAL;
870 			break;
871 		}
872 		bc = &bridge_control_table[ifd->ifd_cmd];
873 
874 		if (cmd == SIOCGDRVSPEC &&
875 		    (bc->bc_flags & BC_F_COPYOUT) == 0) {
876 			error = EINVAL;
877 			break;
878 		}
879 		else if (cmd == SIOCSDRVSPEC &&
880 		    (bc->bc_flags & BC_F_COPYOUT) != 0) {
881 			error = EINVAL;
882 			break;
883 		}
884 
885 		if (bc->bc_flags & BC_F_SUSER) {
886 			error = priv_check(td, PRIV_NET_BRIDGE);
887 			if (error)
888 				break;
889 		}
890 
891 		if (ifd->ifd_len != bc->bc_argsize ||
892 		    ifd->ifd_len > sizeof(args)) {
893 			error = EINVAL;
894 			break;
895 		}
896 
897 		bzero(&args, sizeof(args));
898 		if (bc->bc_flags & BC_F_COPYIN) {
899 			error = copyin(ifd->ifd_data, &args, ifd->ifd_len);
900 			if (error)
901 				break;
902 		}
903 
904 		oldmtu = ifp->if_mtu;
905 		error = (*bc->bc_func)(sc, &args);
906 		if (error)
907 			break;
908 
909 		/*
910 		 * Bridge MTU may change during addition of the first port.
911 		 * If it did, do network layer specific procedure.
912 		 */
913 		if (ifp->if_mtu != oldmtu) {
914 #ifdef INET6
915 			nd6_setmtu(ifp);
916 #endif
917 			rt_updatemtu(ifp);
918 		}
919 
920 		if (bc->bc_flags & BC_F_COPYOUT)
921 			error = copyout(&args, ifd->ifd_data, ifd->ifd_len);
922 
923 		break;
924 
925 	case SIOCSIFFLAGS:
926 		if (!(ifp->if_flags & IFF_UP) &&
927 		    (ifp->if_drv_flags & IFF_DRV_RUNNING)) {
928 			/*
929 			 * If interface is marked down and it is running,
930 			 * then stop and disable it.
931 			 */
932 			bridge_stop(ifp, 1);
933 		} else if ((ifp->if_flags & IFF_UP) &&
934 		    !(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
935 			/*
936 			 * If interface is marked up and it is stopped, then
937 			 * start it.
938 			 */
939 			BRIDGE_UNLOCK(sc);
940 			(*ifp->if_init)(sc);
941 			BRIDGE_LOCK(sc);
942 		}
943 		break;
944 
945 	case SIOCSIFMTU:
946 		oldmtu = sc->sc_ifp->if_mtu;
947 
948 		if (ifr->ifr_mtu < 576) {
949 			error = EINVAL;
950 			break;
951 		}
952 		if (CK_LIST_EMPTY(&sc->sc_iflist)) {
953 			sc->sc_ifp->if_mtu = ifr->ifr_mtu;
954 			break;
955 		}
956 		CK_LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
957 			error = (*bif->bif_ifp->if_ioctl)(bif->bif_ifp,
958 			    SIOCSIFMTU, (caddr_t)ifr);
959 			if (error != 0) {
960 				log(LOG_NOTICE, "%s: invalid MTU: %u for"
961 				    " member %s\n", sc->sc_ifp->if_xname,
962 				    ifr->ifr_mtu,
963 				    bif->bif_ifp->if_xname);
964 				error = EINVAL;
965 				break;
966 			}
967 		}
968 		if (error) {
969 			/* Restore the previous MTU on all member interfaces. */
970 			ifr->ifr_mtu = oldmtu;
971 			CK_LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
972 				(*bif->bif_ifp->if_ioctl)(bif->bif_ifp,
973 				    SIOCSIFMTU, (caddr_t)ifr);
974 			}
975 		} else {
976 			sc->sc_ifp->if_mtu = ifr->ifr_mtu;
977 		}
978 		break;
979 	default:
980 		/*
981 		 * drop the lock as ether_ioctl() will call bridge_start() and
982 		 * cause the lock to be recursed.
983 		 */
984 		BRIDGE_UNLOCK(sc);
985 		error = ether_ioctl(ifp, cmd, data);
986 		BRIDGE_LOCK(sc);
987 		break;
988 	}
989 
990 	BRIDGE_UNLOCK(sc);
991 
992 	return (error);
993 }
994 
995 /*
996  * bridge_mutecaps:
997  *
998  *	Clear or restore unwanted capabilities on the member interface
999  */
1000 static void
1001 bridge_mutecaps(struct bridge_softc *sc)
1002 {
1003 	struct bridge_iflist *bif;
1004 	int enabled, mask;
1005 
1006 	BRIDGE_LOCK_ASSERT(sc);
1007 
1008 	/* Initial bitmask of capabilities to test */
1009 	mask = BRIDGE_IFCAPS_MASK;
1010 
1011 	CK_LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
1012 		/* Every member must support it or its disabled */
1013 		mask &= bif->bif_savedcaps;
1014 	}
1015 
1016 	CK_LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
1017 		enabled = bif->bif_ifp->if_capenable;
1018 		enabled &= ~BRIDGE_IFCAPS_STRIP;
1019 		/* strip off mask bits and enable them again if allowed */
1020 		enabled &= ~BRIDGE_IFCAPS_MASK;
1021 		enabled |= mask;
1022 		bridge_set_ifcap(sc, bif, enabled);
1023 	}
1024 }
1025 
1026 static void
1027 bridge_set_ifcap(struct bridge_softc *sc, struct bridge_iflist *bif, int set)
1028 {
1029 	struct ifnet *ifp = bif->bif_ifp;
1030 	struct ifreq ifr;
1031 	int error, mask, stuck;
1032 
1033 	bzero(&ifr, sizeof(ifr));
1034 	ifr.ifr_reqcap = set;
1035 
1036 	if (ifp->if_capenable != set) {
1037 		error = (*ifp->if_ioctl)(ifp, SIOCSIFCAP, (caddr_t)&ifr);
1038 		if (error)
1039 			if_printf(sc->sc_ifp,
1040 			    "error setting capabilities on %s: %d\n",
1041 			    ifp->if_xname, error);
1042 		mask = BRIDGE_IFCAPS_MASK | BRIDGE_IFCAPS_STRIP;
1043 		stuck = ifp->if_capenable & mask & ~set;
1044 		if (stuck != 0)
1045 			if_printf(sc->sc_ifp,
1046 			    "can't disable some capabilities on %s: 0x%x\n",
1047 			    ifp->if_xname, stuck);
1048 	}
1049 }
1050 
1051 /*
1052  * bridge_lookup_member:
1053  *
1054  *	Lookup a bridge member interface.
1055  */
1056 static struct bridge_iflist *
1057 bridge_lookup_member(struct bridge_softc *sc, const char *name)
1058 {
1059 	struct bridge_iflist *bif;
1060 	struct ifnet *ifp;
1061 
1062 	BRIDGE_LOCK_OR_NET_EPOCH_ASSERT(sc);
1063 
1064 	CK_LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
1065 		ifp = bif->bif_ifp;
1066 		if (strcmp(ifp->if_xname, name) == 0)
1067 			return (bif);
1068 	}
1069 
1070 	return (NULL);
1071 }
1072 
1073 /*
1074  * bridge_lookup_member_if:
1075  *
1076  *	Lookup a bridge member interface by ifnet*.
1077  */
1078 static struct bridge_iflist *
1079 bridge_lookup_member_if(struct bridge_softc *sc, struct ifnet *member_ifp)
1080 {
1081 	struct bridge_iflist *bif;
1082 
1083 	BRIDGE_LOCK_OR_NET_EPOCH_ASSERT(sc);
1084 
1085 	CK_LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
1086 		if (bif->bif_ifp == member_ifp)
1087 			return (bif);
1088 	}
1089 
1090 	return (NULL);
1091 }
1092 
1093 static void
1094 bridge_delete_member_cb(struct epoch_context *ctx)
1095 {
1096 	struct bridge_iflist *bif;
1097 
1098 	bif = __containerof(ctx, struct bridge_iflist, bif_epoch_ctx);
1099 
1100 	free(bif, M_DEVBUF);
1101 }
1102 
1103 /*
1104  * bridge_delete_member:
1105  *
1106  *	Delete the specified member interface.
1107  */
1108 static void
1109 bridge_delete_member(struct bridge_softc *sc, struct bridge_iflist *bif,
1110     int gone)
1111 {
1112 	struct ifnet *ifs = bif->bif_ifp;
1113 	struct ifnet *fif = NULL;
1114 	struct bridge_iflist *bifl;
1115 
1116 	BRIDGE_LOCK_ASSERT(sc);
1117 
1118 	if (bif->bif_flags & IFBIF_STP)
1119 		bstp_disable(&bif->bif_stp);
1120 
1121 	ifs->if_bridge = NULL;
1122 	CK_LIST_REMOVE(bif, bif_next);
1123 
1124 	/*
1125 	 * If removing the interface that gave the bridge its mac address, set
1126 	 * the mac address of the bridge to the address of the next member, or
1127 	 * to its default address if no members are left.
1128 	 */
1129 	if (V_bridge_inherit_mac && sc->sc_ifaddr == ifs) {
1130 		if (CK_LIST_EMPTY(&sc->sc_iflist)) {
1131 			bcopy(&sc->sc_defaddr,
1132 			    IF_LLADDR(sc->sc_ifp), ETHER_ADDR_LEN);
1133 			sc->sc_ifaddr = NULL;
1134 		} else {
1135 			bifl = CK_LIST_FIRST(&sc->sc_iflist);
1136 			fif = bifl->bif_ifp;
1137 			bcopy(IF_LLADDR(fif),
1138 			    IF_LLADDR(sc->sc_ifp), ETHER_ADDR_LEN);
1139 			sc->sc_ifaddr = fif;
1140 		}
1141 		EVENTHANDLER_INVOKE(iflladdr_event, sc->sc_ifp);
1142 	}
1143 
1144 	bridge_linkcheck(sc);
1145 	bridge_mutecaps(sc);	/* recalcuate now this interface is removed */
1146 	BRIDGE_RT_LOCK(sc);
1147 	bridge_rtdelete(sc, ifs, IFBF_FLUSHALL);
1148 	BRIDGE_RT_UNLOCK(sc);
1149 	KASSERT(bif->bif_addrcnt == 0,
1150 	    ("%s: %d bridge routes referenced", __func__, bif->bif_addrcnt));
1151 
1152 	ifs->if_bridge_output = NULL;
1153 	ifs->if_bridge_input = NULL;
1154 	ifs->if_bridge_linkstate = NULL;
1155 	if (!gone) {
1156 		switch (ifs->if_type) {
1157 		case IFT_ETHER:
1158 		case IFT_L2VLAN:
1159 			/*
1160 			 * Take the interface out of promiscuous mode, but only
1161 			 * if it was promiscuous in the first place. It might
1162 			 * not be if we're in the bridge_ioctl_add() error path.
1163 			 */
1164 			if (ifs->if_flags & IFF_PROMISC)
1165 				(void) ifpromisc(ifs, 0);
1166 			break;
1167 
1168 		case IFT_GIF:
1169 			break;
1170 
1171 		default:
1172 #ifdef DIAGNOSTIC
1173 			panic("bridge_delete_member: impossible");
1174 #endif
1175 			break;
1176 		}
1177 		/* reneable any interface capabilities */
1178 		bridge_set_ifcap(sc, bif, bif->bif_savedcaps);
1179 	}
1180 	bstp_destroy(&bif->bif_stp);	/* prepare to free */
1181 
1182 	NET_EPOCH_CALL(bridge_delete_member_cb, &bif->bif_epoch_ctx);
1183 }
1184 
1185 /*
1186  * bridge_delete_span:
1187  *
1188  *	Delete the specified span interface.
1189  */
1190 static void
1191 bridge_delete_span(struct bridge_softc *sc, struct bridge_iflist *bif)
1192 {
1193 	BRIDGE_LOCK_ASSERT(sc);
1194 
1195 	KASSERT(bif->bif_ifp->if_bridge == NULL,
1196 	    ("%s: not a span interface", __func__));
1197 
1198 	CK_LIST_REMOVE(bif, bif_next);
1199 
1200 	NET_EPOCH_CALL(bridge_delete_member_cb, &bif->bif_epoch_ctx);
1201 }
1202 
1203 static int
1204 bridge_ioctl_add(struct bridge_softc *sc, void *arg)
1205 {
1206 	struct ifbreq *req = arg;
1207 	struct bridge_iflist *bif = NULL;
1208 	struct ifnet *ifs;
1209 	int error = 0;
1210 
1211 	ifs = ifunit(req->ifbr_ifsname);
1212 	if (ifs == NULL)
1213 		return (ENOENT);
1214 	if (ifs->if_ioctl == NULL)	/* must be supported */
1215 		return (EINVAL);
1216 
1217 	/* If it's in the span list, it can't be a member. */
1218 	CK_LIST_FOREACH(bif, &sc->sc_spanlist, bif_next)
1219 		if (ifs == bif->bif_ifp)
1220 			return (EBUSY);
1221 
1222 	if (ifs->if_bridge == sc)
1223 		return (EEXIST);
1224 
1225 	if (ifs->if_bridge != NULL)
1226 		return (EBUSY);
1227 
1228 	switch (ifs->if_type) {
1229 	case IFT_ETHER:
1230 	case IFT_L2VLAN:
1231 	case IFT_GIF:
1232 		/* permitted interface types */
1233 		break;
1234 	default:
1235 		return (EINVAL);
1236 	}
1237 
1238 #ifdef INET6
1239 	/*
1240 	 * Two valid inet6 addresses with link-local scope must not be
1241 	 * on the parent interface and the member interfaces at the
1242 	 * same time.  This restriction is needed to prevent violation
1243 	 * of link-local scope zone.  Attempts to add a member
1244 	 * interface which has inet6 addresses when the parent has
1245 	 * inet6 triggers removal of all inet6 addresses on the member
1246 	 * interface.
1247 	 */
1248 
1249 	/* Check if the parent interface has a link-local scope addr. */
1250 	if (V_allow_llz_overlap == 0 &&
1251 	    in6ifa_llaonifp(sc->sc_ifp) != NULL) {
1252 		/*
1253 		 * If any, remove all inet6 addresses from the member
1254 		 * interfaces.
1255 		 */
1256 		CK_LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
1257  			if (in6ifa_llaonifp(bif->bif_ifp)) {
1258 				in6_ifdetach(bif->bif_ifp);
1259 				if_printf(sc->sc_ifp,
1260 				    "IPv6 addresses on %s have been removed "
1261 				    "before adding it as a member to prevent "
1262 				    "IPv6 address scope violation.\n",
1263 				    bif->bif_ifp->if_xname);
1264 			}
1265 		}
1266 		if (in6ifa_llaonifp(ifs)) {
1267 			in6_ifdetach(ifs);
1268 			if_printf(sc->sc_ifp,
1269 			    "IPv6 addresses on %s have been removed "
1270 			    "before adding it as a member to prevent "
1271 			    "IPv6 address scope violation.\n",
1272 			    ifs->if_xname);
1273 		}
1274 	}
1275 #endif
1276 	/* Allow the first Ethernet member to define the MTU */
1277 	if (CK_LIST_EMPTY(&sc->sc_iflist))
1278 		sc->sc_ifp->if_mtu = ifs->if_mtu;
1279 	else if (sc->sc_ifp->if_mtu != ifs->if_mtu) {
1280 		struct ifreq ifr;
1281 
1282 		snprintf(ifr.ifr_name, sizeof(ifr.ifr_name), "%s",
1283 		    ifs->if_xname);
1284 		ifr.ifr_mtu = sc->sc_ifp->if_mtu;
1285 
1286 		error = (*ifs->if_ioctl)(ifs,
1287 		    SIOCSIFMTU, (caddr_t)&ifr);
1288 		if (error != 0) {
1289 			log(LOG_NOTICE, "%s: invalid MTU: %u for"
1290 			    " new member %s\n", sc->sc_ifp->if_xname,
1291 			    ifr.ifr_mtu,
1292 			    ifs->if_xname);
1293 			return (EINVAL);
1294 		}
1295 	}
1296 
1297 	bif = malloc(sizeof(*bif), M_DEVBUF, M_NOWAIT|M_ZERO);
1298 	if (bif == NULL)
1299 		return (ENOMEM);
1300 
1301 	bif->bif_ifp = ifs;
1302 	bif->bif_flags = IFBIF_LEARNING | IFBIF_DISCOVER;
1303 	bif->bif_savedcaps = ifs->if_capenable;
1304 
1305 	/*
1306 	 * Assign the interface's MAC address to the bridge if it's the first
1307 	 * member and the MAC address of the bridge has not been changed from
1308 	 * the default randomly generated one.
1309 	 */
1310 	if (V_bridge_inherit_mac && CK_LIST_EMPTY(&sc->sc_iflist) &&
1311 	    !memcmp(IF_LLADDR(sc->sc_ifp), sc->sc_defaddr.octet, ETHER_ADDR_LEN)) {
1312 		bcopy(IF_LLADDR(ifs), IF_LLADDR(sc->sc_ifp), ETHER_ADDR_LEN);
1313 		sc->sc_ifaddr = ifs;
1314 		EVENTHANDLER_INVOKE(iflladdr_event, sc->sc_ifp);
1315 	}
1316 
1317 	ifs->if_bridge = sc;
1318 	ifs->if_bridge_output = bridge_output;
1319 	ifs->if_bridge_input = bridge_input;
1320 	ifs->if_bridge_linkstate = bridge_linkstate;
1321 	bstp_create(&sc->sc_stp, &bif->bif_stp, bif->bif_ifp);
1322 	/*
1323 	 * XXX: XLOCK HERE!?!
1324 	 *
1325 	 * NOTE: insert_***HEAD*** should be safe for the traversals.
1326 	 */
1327 	CK_LIST_INSERT_HEAD(&sc->sc_iflist, bif, bif_next);
1328 
1329 	/* Set interface capabilities to the intersection set of all members */
1330 	bridge_mutecaps(sc);
1331 	bridge_linkcheck(sc);
1332 
1333 	/* Place the interface into promiscuous mode */
1334 	switch (ifs->if_type) {
1335 		case IFT_ETHER:
1336 		case IFT_L2VLAN:
1337 			error = ifpromisc(ifs, 1);
1338 			break;
1339 	}
1340 
1341 	if (error)
1342 		bridge_delete_member(sc, bif, 0);
1343 	return (error);
1344 }
1345 
1346 static int
1347 bridge_ioctl_del(struct bridge_softc *sc, void *arg)
1348 {
1349 	struct ifbreq *req = arg;
1350 	struct bridge_iflist *bif;
1351 
1352 	bif = bridge_lookup_member(sc, req->ifbr_ifsname);
1353 	if (bif == NULL)
1354 		return (ENOENT);
1355 
1356 	bridge_delete_member(sc, bif, 0);
1357 
1358 	return (0);
1359 }
1360 
1361 static int
1362 bridge_ioctl_gifflags(struct bridge_softc *sc, void *arg)
1363 {
1364 	struct ifbreq *req = arg;
1365 	struct bridge_iflist *bif;
1366 	struct bstp_port *bp;
1367 
1368 	bif = bridge_lookup_member(sc, req->ifbr_ifsname);
1369 	if (bif == NULL)
1370 		return (ENOENT);
1371 
1372 	bp = &bif->bif_stp;
1373 	req->ifbr_ifsflags = bif->bif_flags;
1374 	req->ifbr_state = bp->bp_state;
1375 	req->ifbr_priority = bp->bp_priority;
1376 	req->ifbr_path_cost = bp->bp_path_cost;
1377 	req->ifbr_portno = bif->bif_ifp->if_index & 0xfff;
1378 	req->ifbr_proto = bp->bp_protover;
1379 	req->ifbr_role = bp->bp_role;
1380 	req->ifbr_stpflags = bp->bp_flags;
1381 	req->ifbr_addrcnt = bif->bif_addrcnt;
1382 	req->ifbr_addrmax = bif->bif_addrmax;
1383 	req->ifbr_addrexceeded = bif->bif_addrexceeded;
1384 
1385 	/* Copy STP state options as flags */
1386 	if (bp->bp_operedge)
1387 		req->ifbr_ifsflags |= IFBIF_BSTP_EDGE;
1388 	if (bp->bp_flags & BSTP_PORT_AUTOEDGE)
1389 		req->ifbr_ifsflags |= IFBIF_BSTP_AUTOEDGE;
1390 	if (bp->bp_ptp_link)
1391 		req->ifbr_ifsflags |= IFBIF_BSTP_PTP;
1392 	if (bp->bp_flags & BSTP_PORT_AUTOPTP)
1393 		req->ifbr_ifsflags |= IFBIF_BSTP_AUTOPTP;
1394 	if (bp->bp_flags & BSTP_PORT_ADMEDGE)
1395 		req->ifbr_ifsflags |= IFBIF_BSTP_ADMEDGE;
1396 	if (bp->bp_flags & BSTP_PORT_ADMCOST)
1397 		req->ifbr_ifsflags |= IFBIF_BSTP_ADMCOST;
1398 	return (0);
1399 }
1400 
1401 static int
1402 bridge_ioctl_sifflags(struct bridge_softc *sc, void *arg)
1403 {
1404 	struct epoch_tracker et;
1405 	struct ifbreq *req = arg;
1406 	struct bridge_iflist *bif;
1407 	struct bstp_port *bp;
1408 	int error;
1409 
1410 	bif = bridge_lookup_member(sc, req->ifbr_ifsname);
1411 	if (bif == NULL)
1412 		return (ENOENT);
1413 	bp = &bif->bif_stp;
1414 
1415 	if (req->ifbr_ifsflags & IFBIF_SPAN)
1416 		/* SPAN is readonly */
1417 		return (EINVAL);
1418 
1419 	NET_EPOCH_ENTER(et);
1420 
1421 	if (req->ifbr_ifsflags & IFBIF_STP) {
1422 		if ((bif->bif_flags & IFBIF_STP) == 0) {
1423 			error = bstp_enable(&bif->bif_stp);
1424 			if (error) {
1425 				NET_EPOCH_EXIT(et);
1426 				return (error);
1427 			}
1428 		}
1429 	} else {
1430 		if ((bif->bif_flags & IFBIF_STP) != 0)
1431 			bstp_disable(&bif->bif_stp);
1432 	}
1433 
1434 	/* Pass on STP flags */
1435 	bstp_set_edge(bp, req->ifbr_ifsflags & IFBIF_BSTP_EDGE ? 1 : 0);
1436 	bstp_set_autoedge(bp, req->ifbr_ifsflags & IFBIF_BSTP_AUTOEDGE ? 1 : 0);
1437 	bstp_set_ptp(bp, req->ifbr_ifsflags & IFBIF_BSTP_PTP ? 1 : 0);
1438 	bstp_set_autoptp(bp, req->ifbr_ifsflags & IFBIF_BSTP_AUTOPTP ? 1 : 0);
1439 
1440 	/* Save the bits relating to the bridge */
1441 	bif->bif_flags = req->ifbr_ifsflags & IFBIFMASK;
1442 
1443 	NET_EPOCH_EXIT(et);
1444 
1445 	return (0);
1446 }
1447 
1448 static int
1449 bridge_ioctl_scache(struct bridge_softc *sc, void *arg)
1450 {
1451 	struct ifbrparam *param = arg;
1452 
1453 	sc->sc_brtmax = param->ifbrp_csize;
1454 	bridge_rttrim(sc);
1455 
1456 	return (0);
1457 }
1458 
1459 static int
1460 bridge_ioctl_gcache(struct bridge_softc *sc, void *arg)
1461 {
1462 	struct ifbrparam *param = arg;
1463 
1464 	param->ifbrp_csize = sc->sc_brtmax;
1465 
1466 	return (0);
1467 }
1468 
1469 static int
1470 bridge_ioctl_gifs(struct bridge_softc *sc, void *arg)
1471 {
1472 	struct ifbifconf *bifc = arg;
1473 	struct bridge_iflist *bif;
1474 	struct ifbreq breq;
1475 	char *buf, *outbuf;
1476 	int count, buflen, len, error = 0;
1477 
1478 	count = 0;
1479 	CK_LIST_FOREACH(bif, &sc->sc_iflist, bif_next)
1480 		count++;
1481 	CK_LIST_FOREACH(bif, &sc->sc_spanlist, bif_next)
1482 		count++;
1483 
1484 	buflen = sizeof(breq) * count;
1485 	if (bifc->ifbic_len == 0) {
1486 		bifc->ifbic_len = buflen;
1487 		return (0);
1488 	}
1489 	outbuf = malloc(buflen, M_TEMP, M_NOWAIT | M_ZERO);
1490 	if (outbuf == NULL)
1491 		return (ENOMEM);
1492 
1493 	count = 0;
1494 	buf = outbuf;
1495 	len = min(bifc->ifbic_len, buflen);
1496 	bzero(&breq, sizeof(breq));
1497 	CK_LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
1498 		if (len < sizeof(breq))
1499 			break;
1500 
1501 		strlcpy(breq.ifbr_ifsname, bif->bif_ifp->if_xname,
1502 		    sizeof(breq.ifbr_ifsname));
1503 		/* Fill in the ifbreq structure */
1504 		error = bridge_ioctl_gifflags(sc, &breq);
1505 		if (error)
1506 			break;
1507 		memcpy(buf, &breq, sizeof(breq));
1508 		count++;
1509 		buf += sizeof(breq);
1510 		len -= sizeof(breq);
1511 	}
1512 	CK_LIST_FOREACH(bif, &sc->sc_spanlist, bif_next) {
1513 		if (len < sizeof(breq))
1514 			break;
1515 
1516 		strlcpy(breq.ifbr_ifsname, bif->bif_ifp->if_xname,
1517 		    sizeof(breq.ifbr_ifsname));
1518 		breq.ifbr_ifsflags = bif->bif_flags;
1519 		breq.ifbr_portno = bif->bif_ifp->if_index & 0xfff;
1520 		memcpy(buf, &breq, sizeof(breq));
1521 		count++;
1522 		buf += sizeof(breq);
1523 		len -= sizeof(breq);
1524 	}
1525 
1526 	bifc->ifbic_len = sizeof(breq) * count;
1527 	error = copyout(outbuf, bifc->ifbic_req, bifc->ifbic_len);
1528 	free(outbuf, M_TEMP);
1529 	return (error);
1530 }
1531 
1532 static int
1533 bridge_ioctl_rts(struct bridge_softc *sc, void *arg)
1534 {
1535 	struct ifbaconf *bac = arg;
1536 	struct bridge_rtnode *brt;
1537 	struct ifbareq bareq;
1538 	char *buf, *outbuf;
1539 	int count, buflen, len, error = 0;
1540 
1541 	if (bac->ifbac_len == 0)
1542 		return (0);
1543 
1544 	count = 0;
1545 	CK_LIST_FOREACH(brt, &sc->sc_rtlist, brt_list)
1546 		count++;
1547 	buflen = sizeof(bareq) * count;
1548 
1549 	outbuf = malloc(buflen, M_TEMP, M_NOWAIT | M_ZERO);
1550 	if (outbuf == NULL)
1551 		return (ENOMEM);
1552 
1553 	count = 0;
1554 	buf = outbuf;
1555 	len = min(bac->ifbac_len, buflen);
1556 	bzero(&bareq, sizeof(bareq));
1557 	CK_LIST_FOREACH(brt, &sc->sc_rtlist, brt_list) {
1558 		if (len < sizeof(bareq))
1559 			goto out;
1560 		strlcpy(bareq.ifba_ifsname, brt->brt_ifp->if_xname,
1561 		    sizeof(bareq.ifba_ifsname));
1562 		memcpy(bareq.ifba_dst, brt->brt_addr, sizeof(brt->brt_addr));
1563 		bareq.ifba_vlan = brt->brt_vlan;
1564 		if ((brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC &&
1565 				time_uptime < brt->brt_expire)
1566 			bareq.ifba_expire = brt->brt_expire - time_uptime;
1567 		else
1568 			bareq.ifba_expire = 0;
1569 		bareq.ifba_flags = brt->brt_flags;
1570 
1571 		memcpy(buf, &bareq, sizeof(bareq));
1572 		count++;
1573 		buf += sizeof(bareq);
1574 		len -= sizeof(bareq);
1575 	}
1576 out:
1577 	bac->ifbac_len = sizeof(bareq) * count;
1578 	error = copyout(outbuf, bac->ifbac_req, bac->ifbac_len);
1579 	free(outbuf, M_TEMP);
1580 	return (error);
1581 }
1582 
1583 static int
1584 bridge_ioctl_saddr(struct bridge_softc *sc, void *arg)
1585 {
1586 	struct ifbareq *req = arg;
1587 	struct bridge_iflist *bif;
1588 	struct epoch_tracker et;
1589 	int error;
1590 
1591 	NET_EPOCH_ENTER(et);
1592 	bif = bridge_lookup_member(sc, req->ifba_ifsname);
1593 	if (bif == NULL) {
1594 		NET_EPOCH_EXIT(et);
1595 		return (ENOENT);
1596 	}
1597 
1598 	/* bridge_rtupdate() may acquire the lock. */
1599 	error = bridge_rtupdate(sc, req->ifba_dst, req->ifba_vlan, bif, 1,
1600 	    req->ifba_flags);
1601 	NET_EPOCH_EXIT(et);
1602 
1603 	return (error);
1604 }
1605 
1606 static int
1607 bridge_ioctl_sto(struct bridge_softc *sc, void *arg)
1608 {
1609 	struct ifbrparam *param = arg;
1610 
1611 	sc->sc_brttimeout = param->ifbrp_ctime;
1612 	return (0);
1613 }
1614 
1615 static int
1616 bridge_ioctl_gto(struct bridge_softc *sc, void *arg)
1617 {
1618 	struct ifbrparam *param = arg;
1619 
1620 	param->ifbrp_ctime = sc->sc_brttimeout;
1621 	return (0);
1622 }
1623 
1624 static int
1625 bridge_ioctl_daddr(struct bridge_softc *sc, void *arg)
1626 {
1627 	struct ifbareq *req = arg;
1628 
1629 	return (bridge_rtdaddr(sc, req->ifba_dst, req->ifba_vlan));
1630 }
1631 
1632 static int
1633 bridge_ioctl_flush(struct bridge_softc *sc, void *arg)
1634 {
1635 	struct ifbreq *req = arg;
1636 
1637 	BRIDGE_RT_LOCK(sc);
1638 	bridge_rtflush(sc, req->ifbr_ifsflags);
1639 	BRIDGE_RT_UNLOCK(sc);
1640 
1641 	return (0);
1642 }
1643 
1644 static int
1645 bridge_ioctl_gpri(struct bridge_softc *sc, void *arg)
1646 {
1647 	struct ifbrparam *param = arg;
1648 	struct bstp_state *bs = &sc->sc_stp;
1649 
1650 	param->ifbrp_prio = bs->bs_bridge_priority;
1651 	return (0);
1652 }
1653 
1654 static int
1655 bridge_ioctl_spri(struct bridge_softc *sc, void *arg)
1656 {
1657 	struct ifbrparam *param = arg;
1658 
1659 	return (bstp_set_priority(&sc->sc_stp, param->ifbrp_prio));
1660 }
1661 
1662 static int
1663 bridge_ioctl_ght(struct bridge_softc *sc, void *arg)
1664 {
1665 	struct ifbrparam *param = arg;
1666 	struct bstp_state *bs = &sc->sc_stp;
1667 
1668 	param->ifbrp_hellotime = bs->bs_bridge_htime >> 8;
1669 	return (0);
1670 }
1671 
1672 static int
1673 bridge_ioctl_sht(struct bridge_softc *sc, void *arg)
1674 {
1675 	struct ifbrparam *param = arg;
1676 
1677 	return (bstp_set_htime(&sc->sc_stp, param->ifbrp_hellotime));
1678 }
1679 
1680 static int
1681 bridge_ioctl_gfd(struct bridge_softc *sc, void *arg)
1682 {
1683 	struct ifbrparam *param = arg;
1684 	struct bstp_state *bs = &sc->sc_stp;
1685 
1686 	param->ifbrp_fwddelay = bs->bs_bridge_fdelay >> 8;
1687 	return (0);
1688 }
1689 
1690 static int
1691 bridge_ioctl_sfd(struct bridge_softc *sc, void *arg)
1692 {
1693 	struct ifbrparam *param = arg;
1694 
1695 	return (bstp_set_fdelay(&sc->sc_stp, param->ifbrp_fwddelay));
1696 }
1697 
1698 static int
1699 bridge_ioctl_gma(struct bridge_softc *sc, void *arg)
1700 {
1701 	struct ifbrparam *param = arg;
1702 	struct bstp_state *bs = &sc->sc_stp;
1703 
1704 	param->ifbrp_maxage = bs->bs_bridge_max_age >> 8;
1705 	return (0);
1706 }
1707 
1708 static int
1709 bridge_ioctl_sma(struct bridge_softc *sc, void *arg)
1710 {
1711 	struct ifbrparam *param = arg;
1712 
1713 	return (bstp_set_maxage(&sc->sc_stp, param->ifbrp_maxage));
1714 }
1715 
1716 static int
1717 bridge_ioctl_sifprio(struct bridge_softc *sc, void *arg)
1718 {
1719 	struct ifbreq *req = arg;
1720 	struct bridge_iflist *bif;
1721 
1722 	bif = bridge_lookup_member(sc, req->ifbr_ifsname);
1723 	if (bif == NULL)
1724 		return (ENOENT);
1725 
1726 	return (bstp_set_port_priority(&bif->bif_stp, req->ifbr_priority));
1727 }
1728 
1729 static int
1730 bridge_ioctl_sifcost(struct bridge_softc *sc, void *arg)
1731 {
1732 	struct ifbreq *req = arg;
1733 	struct bridge_iflist *bif;
1734 
1735 	bif = bridge_lookup_member(sc, req->ifbr_ifsname);
1736 	if (bif == NULL)
1737 		return (ENOENT);
1738 
1739 	return (bstp_set_path_cost(&bif->bif_stp, req->ifbr_path_cost));
1740 }
1741 
1742 static int
1743 bridge_ioctl_sifmaxaddr(struct bridge_softc *sc, void *arg)
1744 {
1745 	struct ifbreq *req = arg;
1746 	struct bridge_iflist *bif;
1747 
1748 	bif = bridge_lookup_member(sc, req->ifbr_ifsname);
1749 	if (bif == NULL)
1750 		return (ENOENT);
1751 
1752 	bif->bif_addrmax = req->ifbr_addrmax;
1753 	return (0);
1754 }
1755 
1756 static int
1757 bridge_ioctl_addspan(struct bridge_softc *sc, void *arg)
1758 {
1759 	struct ifbreq *req = arg;
1760 	struct bridge_iflist *bif = NULL;
1761 	struct ifnet *ifs;
1762 
1763 	ifs = ifunit(req->ifbr_ifsname);
1764 	if (ifs == NULL)
1765 		return (ENOENT);
1766 
1767 	CK_LIST_FOREACH(bif, &sc->sc_spanlist, bif_next)
1768 		if (ifs == bif->bif_ifp)
1769 			return (EBUSY);
1770 
1771 	if (ifs->if_bridge != NULL)
1772 		return (EBUSY);
1773 
1774 	switch (ifs->if_type) {
1775 		case IFT_ETHER:
1776 		case IFT_GIF:
1777 		case IFT_L2VLAN:
1778 			break;
1779 		default:
1780 			return (EINVAL);
1781 	}
1782 
1783 	bif = malloc(sizeof(*bif), M_DEVBUF, M_NOWAIT|M_ZERO);
1784 	if (bif == NULL)
1785 		return (ENOMEM);
1786 
1787 	bif->bif_ifp = ifs;
1788 	bif->bif_flags = IFBIF_SPAN;
1789 
1790 	CK_LIST_INSERT_HEAD(&sc->sc_spanlist, bif, bif_next);
1791 
1792 	return (0);
1793 }
1794 
1795 static int
1796 bridge_ioctl_delspan(struct bridge_softc *sc, void *arg)
1797 {
1798 	struct ifbreq *req = arg;
1799 	struct bridge_iflist *bif;
1800 	struct ifnet *ifs;
1801 
1802 	ifs = ifunit(req->ifbr_ifsname);
1803 	if (ifs == NULL)
1804 		return (ENOENT);
1805 
1806 	CK_LIST_FOREACH(bif, &sc->sc_spanlist, bif_next)
1807 		if (ifs == bif->bif_ifp)
1808 			break;
1809 
1810 	if (bif == NULL)
1811 		return (ENOENT);
1812 
1813 	bridge_delete_span(sc, bif);
1814 
1815 	return (0);
1816 }
1817 
1818 static int
1819 bridge_ioctl_gbparam(struct bridge_softc *sc, void *arg)
1820 {
1821 	struct ifbropreq *req = arg;
1822 	struct bstp_state *bs = &sc->sc_stp;
1823 	struct bstp_port *root_port;
1824 
1825 	req->ifbop_maxage = bs->bs_bridge_max_age >> 8;
1826 	req->ifbop_hellotime = bs->bs_bridge_htime >> 8;
1827 	req->ifbop_fwddelay = bs->bs_bridge_fdelay >> 8;
1828 
1829 	root_port = bs->bs_root_port;
1830 	if (root_port == NULL)
1831 		req->ifbop_root_port = 0;
1832 	else
1833 		req->ifbop_root_port = root_port->bp_ifp->if_index;
1834 
1835 	req->ifbop_holdcount = bs->bs_txholdcount;
1836 	req->ifbop_priority = bs->bs_bridge_priority;
1837 	req->ifbop_protocol = bs->bs_protover;
1838 	req->ifbop_root_path_cost = bs->bs_root_pv.pv_cost;
1839 	req->ifbop_bridgeid = bs->bs_bridge_pv.pv_dbridge_id;
1840 	req->ifbop_designated_root = bs->bs_root_pv.pv_root_id;
1841 	req->ifbop_designated_bridge = bs->bs_root_pv.pv_dbridge_id;
1842 	req->ifbop_last_tc_time.tv_sec = bs->bs_last_tc_time.tv_sec;
1843 	req->ifbop_last_tc_time.tv_usec = bs->bs_last_tc_time.tv_usec;
1844 
1845 	return (0);
1846 }
1847 
1848 static int
1849 bridge_ioctl_grte(struct bridge_softc *sc, void *arg)
1850 {
1851 	struct ifbrparam *param = arg;
1852 
1853 	param->ifbrp_cexceeded = sc->sc_brtexceeded;
1854 	return (0);
1855 }
1856 
1857 static int
1858 bridge_ioctl_gifsstp(struct bridge_softc *sc, void *arg)
1859 {
1860 	struct ifbpstpconf *bifstp = arg;
1861 	struct bridge_iflist *bif;
1862 	struct bstp_port *bp;
1863 	struct ifbpstpreq bpreq;
1864 	char *buf, *outbuf;
1865 	int count, buflen, len, error = 0;
1866 
1867 	count = 0;
1868 	CK_LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
1869 		if ((bif->bif_flags & IFBIF_STP) != 0)
1870 			count++;
1871 	}
1872 
1873 	buflen = sizeof(bpreq) * count;
1874 	if (bifstp->ifbpstp_len == 0) {
1875 		bifstp->ifbpstp_len = buflen;
1876 		return (0);
1877 	}
1878 
1879 	outbuf = malloc(buflen, M_TEMP, M_NOWAIT | M_ZERO);
1880 	if (outbuf == NULL)
1881 		return (ENOMEM);
1882 
1883 	count = 0;
1884 	buf = outbuf;
1885 	len = min(bifstp->ifbpstp_len, buflen);
1886 	bzero(&bpreq, sizeof(bpreq));
1887 	CK_LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
1888 		if (len < sizeof(bpreq))
1889 			break;
1890 
1891 		if ((bif->bif_flags & IFBIF_STP) == 0)
1892 			continue;
1893 
1894 		bp = &bif->bif_stp;
1895 		bpreq.ifbp_portno = bif->bif_ifp->if_index & 0xfff;
1896 		bpreq.ifbp_fwd_trans = bp->bp_forward_transitions;
1897 		bpreq.ifbp_design_cost = bp->bp_desg_pv.pv_cost;
1898 		bpreq.ifbp_design_port = bp->bp_desg_pv.pv_port_id;
1899 		bpreq.ifbp_design_bridge = bp->bp_desg_pv.pv_dbridge_id;
1900 		bpreq.ifbp_design_root = bp->bp_desg_pv.pv_root_id;
1901 
1902 		memcpy(buf, &bpreq, sizeof(bpreq));
1903 		count++;
1904 		buf += sizeof(bpreq);
1905 		len -= sizeof(bpreq);
1906 	}
1907 
1908 	bifstp->ifbpstp_len = sizeof(bpreq) * count;
1909 	error = copyout(outbuf, bifstp->ifbpstp_req, bifstp->ifbpstp_len);
1910 	free(outbuf, M_TEMP);
1911 	return (error);
1912 }
1913 
1914 static int
1915 bridge_ioctl_sproto(struct bridge_softc *sc, void *arg)
1916 {
1917 	struct ifbrparam *param = arg;
1918 
1919 	return (bstp_set_protocol(&sc->sc_stp, param->ifbrp_proto));
1920 }
1921 
1922 static int
1923 bridge_ioctl_stxhc(struct bridge_softc *sc, void *arg)
1924 {
1925 	struct ifbrparam *param = arg;
1926 
1927 	return (bstp_set_holdcount(&sc->sc_stp, param->ifbrp_txhc));
1928 }
1929 
1930 /*
1931  * bridge_ifdetach:
1932  *
1933  *	Detach an interface from a bridge.  Called when a member
1934  *	interface is detaching.
1935  */
1936 static void
1937 bridge_ifdetach(void *arg __unused, struct ifnet *ifp)
1938 {
1939 	struct bridge_softc *sc = ifp->if_bridge;
1940 	struct bridge_iflist *bif;
1941 
1942 	if (ifp->if_flags & IFF_RENAMING)
1943 		return;
1944 	if (V_bridge_cloner == NULL) {
1945 		/*
1946 		 * This detach handler can be called after
1947 		 * vnet_bridge_uninit().  Just return in that case.
1948 		 */
1949 		return;
1950 	}
1951 	/* Check if the interface is a bridge member */
1952 	if (sc != NULL) {
1953 		BRIDGE_LOCK(sc);
1954 
1955 		bif = bridge_lookup_member_if(sc, ifp);
1956 		if (bif != NULL)
1957 			bridge_delete_member(sc, bif, 1);
1958 
1959 		BRIDGE_UNLOCK(sc);
1960 		return;
1961 	}
1962 
1963 	/* Check if the interface is a span port */
1964 	BRIDGE_LIST_LOCK();
1965 	LIST_FOREACH(sc, &V_bridge_list, sc_list) {
1966 		BRIDGE_LOCK(sc);
1967 		CK_LIST_FOREACH(bif, &sc->sc_spanlist, bif_next)
1968 			if (ifp == bif->bif_ifp) {
1969 				bridge_delete_span(sc, bif);
1970 				break;
1971 			}
1972 
1973 		BRIDGE_UNLOCK(sc);
1974 	}
1975 	BRIDGE_LIST_UNLOCK();
1976 }
1977 
1978 /*
1979  * bridge_init:
1980  *
1981  *	Initialize a bridge interface.
1982  */
1983 static void
1984 bridge_init(void *xsc)
1985 {
1986 	struct bridge_softc *sc = (struct bridge_softc *)xsc;
1987 	struct ifnet *ifp = sc->sc_ifp;
1988 
1989 	if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1990 		return;
1991 
1992 	BRIDGE_LOCK(sc);
1993 	callout_reset(&sc->sc_brcallout, bridge_rtable_prune_period * hz,
1994 	    bridge_timer, sc);
1995 
1996 	ifp->if_drv_flags |= IFF_DRV_RUNNING;
1997 	bstp_init(&sc->sc_stp);		/* Initialize Spanning Tree */
1998 
1999 	BRIDGE_UNLOCK(sc);
2000 }
2001 
2002 /*
2003  * bridge_stop:
2004  *
2005  *	Stop the bridge interface.
2006  */
2007 static void
2008 bridge_stop(struct ifnet *ifp, int disable)
2009 {
2010 	struct bridge_softc *sc = ifp->if_softc;
2011 
2012 	BRIDGE_LOCK_ASSERT(sc);
2013 
2014 	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
2015 		return;
2016 
2017 	BRIDGE_RT_LOCK(sc);
2018 	callout_stop(&sc->sc_brcallout);
2019 
2020 	bstp_stop(&sc->sc_stp);
2021 
2022 	bridge_rtflush(sc, IFBF_FLUSHDYN);
2023 	BRIDGE_RT_UNLOCK(sc);
2024 
2025 	ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2026 }
2027 
2028 /*
2029  * bridge_enqueue:
2030  *
2031  *	Enqueue a packet on a bridge member interface.
2032  *
2033  */
2034 static int
2035 bridge_enqueue(struct bridge_softc *sc, struct ifnet *dst_ifp, struct mbuf *m)
2036 {
2037 	int len, err = 0;
2038 	short mflags;
2039 	struct mbuf *m0;
2040 
2041 	/* We may be sending a fragment so traverse the mbuf */
2042 	for (; m; m = m0) {
2043 		m0 = m->m_nextpkt;
2044 		m->m_nextpkt = NULL;
2045 		len = m->m_pkthdr.len;
2046 		mflags = m->m_flags;
2047 
2048 		/*
2049 		 * If underlying interface can not do VLAN tag insertion itself
2050 		 * then attach a packet tag that holds it.
2051 		 */
2052 		if ((m->m_flags & M_VLANTAG) &&
2053 		    (dst_ifp->if_capenable & IFCAP_VLAN_HWTAGGING) == 0) {
2054 			m = ether_vlanencap(m, m->m_pkthdr.ether_vtag);
2055 			if (m == NULL) {
2056 				if_printf(dst_ifp,
2057 				    "unable to prepend VLAN header\n");
2058 				if_inc_counter(dst_ifp, IFCOUNTER_OERRORS, 1);
2059 				continue;
2060 			}
2061 			m->m_flags &= ~M_VLANTAG;
2062 		}
2063 
2064 		M_ASSERTPKTHDR(m); /* We shouldn't transmit mbuf without pkthdr */
2065 		if ((err = dst_ifp->if_transmit(dst_ifp, m))) {
2066 			int n;
2067 
2068 			for (m = m0, n = 1; m != NULL; m = m0, n++) {
2069 				m0 = m->m_nextpkt;
2070 				m_freem(m);
2071 			}
2072 			if_inc_counter(sc->sc_ifp, IFCOUNTER_OERRORS, n);
2073 			break;
2074 		}
2075 
2076 		if_inc_counter(sc->sc_ifp, IFCOUNTER_OPACKETS, 1);
2077 		if_inc_counter(sc->sc_ifp, IFCOUNTER_OBYTES, len);
2078 		if (mflags & M_MCAST)
2079 			if_inc_counter(sc->sc_ifp, IFCOUNTER_OMCASTS, 1);
2080 	}
2081 
2082 	return (err);
2083 }
2084 
2085 /*
2086  * bridge_dummynet:
2087  *
2088  * 	Receive a queued packet from dummynet and pass it on to the output
2089  * 	interface.
2090  *
2091  *	The mbuf has the Ethernet header already attached.
2092  */
2093 static void
2094 bridge_dummynet(struct mbuf *m, struct ifnet *ifp)
2095 {
2096 	struct bridge_softc *sc;
2097 
2098 	sc = ifp->if_bridge;
2099 
2100 	/*
2101 	 * The packet didnt originate from a member interface. This should only
2102 	 * ever happen if a member interface is removed while packets are
2103 	 * queued for it.
2104 	 */
2105 	if (sc == NULL) {
2106 		m_freem(m);
2107 		return;
2108 	}
2109 
2110 	if (PFIL_HOOKED_OUT(V_inet_pfil_head)
2111 #ifdef INET6
2112 	    || PFIL_HOOKED_OUT(V_inet6_pfil_head)
2113 #endif
2114 	    ) {
2115 		if (bridge_pfil(&m, sc->sc_ifp, ifp, PFIL_OUT) != 0)
2116 			return;
2117 		if (m == NULL)
2118 			return;
2119 	}
2120 
2121 	bridge_enqueue(sc, ifp, m);
2122 }
2123 
2124 /*
2125  * bridge_output:
2126  *
2127  *	Send output from a bridge member interface.  This
2128  *	performs the bridging function for locally originated
2129  *	packets.
2130  *
2131  *	The mbuf has the Ethernet header already attached.  We must
2132  *	enqueue or free the mbuf before returning.
2133  */
2134 static int
2135 bridge_output(struct ifnet *ifp, struct mbuf *m, struct sockaddr *sa,
2136     struct rtentry *rt)
2137 {
2138 	struct ether_header *eh;
2139 	struct ifnet *bifp, *dst_if;
2140 	struct bridge_softc *sc;
2141 	uint16_t vlan;
2142 
2143 	NET_EPOCH_ASSERT();
2144 
2145 	if (m->m_len < ETHER_HDR_LEN) {
2146 		m = m_pullup(m, ETHER_HDR_LEN);
2147 		if (m == NULL)
2148 			return (0);
2149 	}
2150 
2151 	eh = mtod(m, struct ether_header *);
2152 	sc = ifp->if_bridge;
2153 	vlan = VLANTAGOF(m);
2154 
2155 	bifp = sc->sc_ifp;
2156 
2157 	/*
2158 	 * If bridge is down, but the original output interface is up,
2159 	 * go ahead and send out that interface.  Otherwise, the packet
2160 	 * is dropped below.
2161 	 */
2162 	if ((bifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
2163 		dst_if = ifp;
2164 		goto sendunicast;
2165 	}
2166 
2167 	/*
2168 	 * If the packet is a multicast, or we don't know a better way to
2169 	 * get there, send to all interfaces.
2170 	 */
2171 	if (ETHER_IS_MULTICAST(eh->ether_dhost))
2172 		dst_if = NULL;
2173 	else
2174 		dst_if = bridge_rtlookup(sc, eh->ether_dhost, vlan);
2175 	/* Tap any traffic not passing back out the originating interface */
2176 	if (dst_if != ifp)
2177 		ETHER_BPF_MTAP(bifp, m);
2178 	if (dst_if == NULL) {
2179 		struct bridge_iflist *bif;
2180 		struct mbuf *mc;
2181 		int used = 0;
2182 
2183 		bridge_span(sc, m);
2184 
2185 		CK_LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
2186 			dst_if = bif->bif_ifp;
2187 
2188 			if (dst_if->if_type == IFT_GIF)
2189 				continue;
2190 			if ((dst_if->if_drv_flags & IFF_DRV_RUNNING) == 0)
2191 				continue;
2192 
2193 			/*
2194 			 * If this is not the original output interface,
2195 			 * and the interface is participating in spanning
2196 			 * tree, make sure the port is in a state that
2197 			 * allows forwarding.
2198 			 */
2199 			if (dst_if != ifp && (bif->bif_flags & IFBIF_STP) &&
2200 			    bif->bif_stp.bp_state == BSTP_IFSTATE_DISCARDING)
2201 				continue;
2202 
2203 			if (CK_LIST_NEXT(bif, bif_next) == NULL) {
2204 				used = 1;
2205 				mc = m;
2206 			} else {
2207 				mc = m_dup(m, M_NOWAIT);
2208 				if (mc == NULL) {
2209 					if_inc_counter(bifp, IFCOUNTER_OERRORS, 1);
2210 					continue;
2211 				}
2212 			}
2213 
2214 			bridge_enqueue(sc, dst_if, mc);
2215 		}
2216 		if (used == 0)
2217 			m_freem(m);
2218 		return (0);
2219 	}
2220 
2221 sendunicast:
2222 	/*
2223 	 * XXX Spanning tree consideration here?
2224 	 */
2225 
2226 	bridge_span(sc, m);
2227 	if ((dst_if->if_drv_flags & IFF_DRV_RUNNING) == 0) {
2228 		m_freem(m);
2229 		return (0);
2230 	}
2231 
2232 	bridge_enqueue(sc, dst_if, m);
2233 	return (0);
2234 }
2235 
2236 /*
2237  * bridge_transmit:
2238  *
2239  *	Do output on a bridge.
2240  *
2241  */
2242 static int
2243 bridge_transmit(struct ifnet *ifp, struct mbuf *m)
2244 {
2245 	struct bridge_softc *sc;
2246 	struct ether_header *eh;
2247 	struct ifnet *dst_if;
2248 	int error = 0;
2249 
2250 	sc = ifp->if_softc;
2251 
2252 	ETHER_BPF_MTAP(ifp, m);
2253 
2254 	eh = mtod(m, struct ether_header *);
2255 
2256 	if (((m->m_flags & (M_BCAST|M_MCAST)) == 0) &&
2257 	    (dst_if = bridge_rtlookup(sc, eh->ether_dhost, 1)) != NULL) {
2258 		error = bridge_enqueue(sc, dst_if, m);
2259 	} else
2260 		bridge_broadcast(sc, ifp, m, 0);
2261 
2262 	return (error);
2263 }
2264 
2265 #ifdef ALTQ
2266 static void
2267 bridge_altq_start(if_t ifp)
2268 {
2269 	struct ifaltq *ifq = &ifp->if_snd;
2270 	struct mbuf *m;
2271 
2272 	IFQ_LOCK(ifq);
2273 	IFQ_DEQUEUE_NOLOCK(ifq, m);
2274 	while (m != NULL) {
2275 		bridge_transmit(ifp, m);
2276 		IFQ_DEQUEUE_NOLOCK(ifq, m);
2277 	}
2278 	IFQ_UNLOCK(ifq);
2279 }
2280 
2281 static int
2282 bridge_altq_transmit(if_t ifp, struct mbuf *m)
2283 {
2284 	int err;
2285 
2286 	if (ALTQ_IS_ENABLED(&ifp->if_snd)) {
2287 		IFQ_ENQUEUE(&ifp->if_snd, m, err);
2288 		if (err == 0)
2289 			bridge_altq_start(ifp);
2290 	} else
2291 		err = bridge_transmit(ifp, m);
2292 
2293 	return (err);
2294 }
2295 #endif	/* ALTQ */
2296 
2297 /*
2298  * The ifp->if_qflush entry point for if_bridge(4) is no-op.
2299  */
2300 static void
2301 bridge_qflush(struct ifnet *ifp __unused)
2302 {
2303 }
2304 
2305 /*
2306  * bridge_forward:
2307  *
2308  *	The forwarding function of the bridge.
2309  *
2310  *	NOTE: Releases the lock on return.
2311  */
2312 static void
2313 bridge_forward(struct bridge_softc *sc, struct bridge_iflist *sbif,
2314     struct mbuf *m)
2315 {
2316 	struct bridge_iflist *dbif;
2317 	struct ifnet *src_if, *dst_if, *ifp;
2318 	struct ether_header *eh;
2319 	uint16_t vlan;
2320 	uint8_t *dst;
2321 	int error;
2322 
2323 	NET_EPOCH_ASSERT();
2324 
2325 	src_if = m->m_pkthdr.rcvif;
2326 	ifp = sc->sc_ifp;
2327 
2328 	if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
2329 	if_inc_counter(ifp, IFCOUNTER_IBYTES, m->m_pkthdr.len);
2330 	vlan = VLANTAGOF(m);
2331 
2332 	if ((sbif->bif_flags & IFBIF_STP) &&
2333 	    sbif->bif_stp.bp_state == BSTP_IFSTATE_DISCARDING)
2334 		goto drop;
2335 
2336 	eh = mtod(m, struct ether_header *);
2337 	dst = eh->ether_dhost;
2338 
2339 	/* If the interface is learning, record the address. */
2340 	if (sbif->bif_flags & IFBIF_LEARNING) {
2341 		error = bridge_rtupdate(sc, eh->ether_shost, vlan,
2342 		    sbif, 0, IFBAF_DYNAMIC);
2343 		/*
2344 		 * If the interface has addresses limits then deny any source
2345 		 * that is not in the cache.
2346 		 */
2347 		if (error && sbif->bif_addrmax)
2348 			goto drop;
2349 	}
2350 
2351 	if ((sbif->bif_flags & IFBIF_STP) != 0 &&
2352 	    sbif->bif_stp.bp_state == BSTP_IFSTATE_LEARNING)
2353 		goto drop;
2354 
2355 	/*
2356 	 * At this point, the port either doesn't participate
2357 	 * in spanning tree or it is in the forwarding state.
2358 	 */
2359 
2360 	/*
2361 	 * If the packet is unicast, destined for someone on
2362 	 * "this" side of the bridge, drop it.
2363 	 */
2364 	if ((m->m_flags & (M_BCAST|M_MCAST)) == 0) {
2365 		dst_if = bridge_rtlookup(sc, dst, vlan);
2366 		if (src_if == dst_if)
2367 			goto drop;
2368 	} else {
2369 		/*
2370 		 * Check if its a reserved multicast address, any address
2371 		 * listed in 802.1D section 7.12.6 may not be forwarded by the
2372 		 * bridge.
2373 		 * This is currently 01-80-C2-00-00-00 to 01-80-C2-00-00-0F
2374 		 */
2375 		if (dst[0] == 0x01 && dst[1] == 0x80 &&
2376 		    dst[2] == 0xc2 && dst[3] == 0x00 &&
2377 		    dst[4] == 0x00 && dst[5] <= 0x0f)
2378 			goto drop;
2379 
2380 		/* ...forward it to all interfaces. */
2381 		if_inc_counter(ifp, IFCOUNTER_IMCASTS, 1);
2382 		dst_if = NULL;
2383 	}
2384 
2385 	/*
2386 	 * If we have a destination interface which is a member of our bridge,
2387 	 * OR this is a unicast packet, push it through the bpf(4) machinery.
2388 	 * For broadcast or multicast packets, don't bother because it will
2389 	 * be reinjected into ether_input. We do this before we pass the packets
2390 	 * through the pfil(9) framework, as it is possible that pfil(9) will
2391 	 * drop the packet, or possibly modify it, making it difficult to debug
2392 	 * firewall issues on the bridge.
2393 	 */
2394 	if (dst_if != NULL || (m->m_flags & (M_BCAST | M_MCAST)) == 0)
2395 		ETHER_BPF_MTAP(ifp, m);
2396 
2397 	/* run the packet filter */
2398 	if (PFIL_HOOKED_IN(V_inet_pfil_head)
2399 #ifdef INET6
2400 	    || PFIL_HOOKED_IN(V_inet6_pfil_head)
2401 #endif
2402 	    ) {
2403 		if (bridge_pfil(&m, ifp, src_if, PFIL_IN) != 0)
2404 			return;
2405 		if (m == NULL)
2406 			return;
2407 	}
2408 
2409 	if (dst_if == NULL) {
2410 		bridge_broadcast(sc, src_if, m, 1);
2411 		return;
2412 	}
2413 
2414 	/*
2415 	 * At this point, we're dealing with a unicast frame
2416 	 * going to a different interface.
2417 	 */
2418 	if ((dst_if->if_drv_flags & IFF_DRV_RUNNING) == 0)
2419 		goto drop;
2420 
2421 	dbif = bridge_lookup_member_if(sc, dst_if);
2422 	if (dbif == NULL)
2423 		/* Not a member of the bridge (anymore?) */
2424 		goto drop;
2425 
2426 	/* Private segments can not talk to each other */
2427 	if (sbif->bif_flags & dbif->bif_flags & IFBIF_PRIVATE)
2428 		goto drop;
2429 
2430 	if ((dbif->bif_flags & IFBIF_STP) &&
2431 	    dbif->bif_stp.bp_state == BSTP_IFSTATE_DISCARDING)
2432 		goto drop;
2433 
2434 	if (PFIL_HOOKED_OUT(V_inet_pfil_head)
2435 #ifdef INET6
2436 	    || PFIL_HOOKED_OUT(V_inet6_pfil_head)
2437 #endif
2438 	    ) {
2439 		if (bridge_pfil(&m, ifp, dst_if, PFIL_OUT) != 0)
2440 			return;
2441 		if (m == NULL)
2442 			return;
2443 	}
2444 
2445 	bridge_enqueue(sc, dst_if, m);
2446 	return;
2447 
2448 drop:
2449 	m_freem(m);
2450 }
2451 
2452 /*
2453  * bridge_input:
2454  *
2455  *	Receive input from a member interface.  Queue the packet for
2456  *	bridging if it is not for us.
2457  */
2458 static struct mbuf *
2459 bridge_input(struct ifnet *ifp, struct mbuf *m)
2460 {
2461 	struct bridge_softc *sc = ifp->if_bridge;
2462 	struct bridge_iflist *bif, *bif2;
2463 	struct ifnet *bifp;
2464 	struct ether_header *eh;
2465 	struct mbuf *mc, *mc2;
2466 	uint16_t vlan;
2467 	int error;
2468 
2469 	NET_EPOCH_ASSERT();
2470 
2471 	if ((sc->sc_ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
2472 		return (m);
2473 
2474 	bifp = sc->sc_ifp;
2475 	vlan = VLANTAGOF(m);
2476 
2477 	/*
2478 	 * Implement support for bridge monitoring. If this flag has been
2479 	 * set on this interface, discard the packet once we push it through
2480 	 * the bpf(4) machinery, but before we do, increment the byte and
2481 	 * packet counters associated with this interface.
2482 	 */
2483 	if ((bifp->if_flags & IFF_MONITOR) != 0) {
2484 		m->m_pkthdr.rcvif  = bifp;
2485 		ETHER_BPF_MTAP(bifp, m);
2486 		if_inc_counter(bifp, IFCOUNTER_IPACKETS, 1);
2487 		if_inc_counter(bifp, IFCOUNTER_IBYTES, m->m_pkthdr.len);
2488 		m_freem(m);
2489 		return (NULL);
2490 	}
2491 	bif = bridge_lookup_member_if(sc, ifp);
2492 	if (bif == NULL) {
2493 		return (m);
2494 	}
2495 
2496 	eh = mtod(m, struct ether_header *);
2497 
2498 	bridge_span(sc, m);
2499 
2500 	if (m->m_flags & (M_BCAST|M_MCAST)) {
2501 		/* Tap off 802.1D packets; they do not get forwarded. */
2502 		if (memcmp(eh->ether_dhost, bstp_etheraddr,
2503 		    ETHER_ADDR_LEN) == 0) {
2504 			bstp_input(&bif->bif_stp, ifp, m); /* consumes mbuf */
2505 			return (NULL);
2506 		}
2507 
2508 		if ((bif->bif_flags & IFBIF_STP) &&
2509 		    bif->bif_stp.bp_state == BSTP_IFSTATE_DISCARDING) {
2510 			return (m);
2511 		}
2512 
2513 		/*
2514 		 * Make a deep copy of the packet and enqueue the copy
2515 		 * for bridge processing; return the original packet for
2516 		 * local processing.
2517 		 */
2518 		mc = m_dup(m, M_NOWAIT);
2519 		if (mc == NULL) {
2520 			return (m);
2521 		}
2522 
2523 		/* Perform the bridge forwarding function with the copy. */
2524 		bridge_forward(sc, bif, mc);
2525 
2526 		/*
2527 		 * Reinject the mbuf as arriving on the bridge so we have a
2528 		 * chance at claiming multicast packets. We can not loop back
2529 		 * here from ether_input as a bridge is never a member of a
2530 		 * bridge.
2531 		 */
2532 		KASSERT(bifp->if_bridge == NULL,
2533 		    ("loop created in bridge_input"));
2534 		mc2 = m_dup(m, M_NOWAIT);
2535 		if (mc2 != NULL) {
2536 			/* Keep the layer3 header aligned */
2537 			int i = min(mc2->m_pkthdr.len, max_protohdr);
2538 			mc2 = m_copyup(mc2, i, ETHER_ALIGN);
2539 		}
2540 		if (mc2 != NULL) {
2541 			mc2->m_pkthdr.rcvif = bifp;
2542 			(*bifp->if_input)(bifp, mc2);
2543 		}
2544 
2545 		/* Return the original packet for local processing. */
2546 		return (m);
2547 	}
2548 
2549 	if ((bif->bif_flags & IFBIF_STP) &&
2550 	    bif->bif_stp.bp_state == BSTP_IFSTATE_DISCARDING) {
2551 		return (m);
2552 	}
2553 
2554 #if (defined(INET) || defined(INET6))
2555 #   define OR_CARP_CHECK_WE_ARE_DST(iface) \
2556 	|| ((iface)->if_carp \
2557 	    && (*carp_forus_p)((iface), eh->ether_dhost))
2558 #   define OR_CARP_CHECK_WE_ARE_SRC(iface) \
2559 	|| ((iface)->if_carp \
2560 	    && (*carp_forus_p)((iface), eh->ether_shost))
2561 #else
2562 #   define OR_CARP_CHECK_WE_ARE_DST(iface)
2563 #   define OR_CARP_CHECK_WE_ARE_SRC(iface)
2564 #endif
2565 
2566 #ifdef INET6
2567 #   define OR_PFIL_HOOKED_INET6 \
2568 	|| PFIL_HOOKED_IN(V_inet6_pfil_head)
2569 #else
2570 #   define OR_PFIL_HOOKED_INET6
2571 #endif
2572 
2573 #define GRAB_OUR_PACKETS(iface) \
2574 	if ((iface)->if_type == IFT_GIF) \
2575 		continue; \
2576 	/* It is destined for us. */ \
2577 	if (memcmp(IF_LLADDR((iface)), eh->ether_dhost,  ETHER_ADDR_LEN) == 0 \
2578 	    OR_CARP_CHECK_WE_ARE_DST((iface))				\
2579 	    ) {								\
2580 		if (bif->bif_flags & IFBIF_LEARNING) {			\
2581 			error = bridge_rtupdate(sc, eh->ether_shost,	\
2582 			    vlan, bif, 0, IFBAF_DYNAMIC);		\
2583 			if (error && bif->bif_addrmax) {		\
2584 				m_freem(m);				\
2585 				return (NULL);				\
2586 			}						\
2587 		}							\
2588 		m->m_pkthdr.rcvif = iface;				\
2589 		if ((iface) == ifp) {					\
2590 			/* Skip bridge processing... src == dest */	\
2591 			return (m);					\
2592 		}							\
2593 		/* It's passing over or to the bridge, locally. */	\
2594 		ETHER_BPF_MTAP(bifp, m);				\
2595 		if_inc_counter(bifp, IFCOUNTER_IPACKETS, 1);		\
2596 		if_inc_counter(bifp, IFCOUNTER_IBYTES, m->m_pkthdr.len); \
2597 		/* Filter on the physical interface. */			\
2598 		if (V_pfil_local_phys && (PFIL_HOOKED_IN(V_inet_pfil_head) \
2599 		     OR_PFIL_HOOKED_INET6)) {				\
2600 			if (bridge_pfil(&m, NULL, ifp,			\
2601 			    PFIL_IN) != 0 || m == NULL) {		\
2602 				return (NULL);				\
2603 			}						\
2604 		}							\
2605 		if ((iface) != bifp)					\
2606 			ETHER_BPF_MTAP(iface, m);			\
2607 		return (m);						\
2608 	}								\
2609 									\
2610 	/* We just received a packet that we sent out. */		\
2611 	if (memcmp(IF_LLADDR((iface)), eh->ether_shost, ETHER_ADDR_LEN) == 0 \
2612 	    OR_CARP_CHECK_WE_ARE_SRC((iface))			\
2613 	    ) {								\
2614 		m_freem(m);						\
2615 		return (NULL);						\
2616 	}
2617 
2618 	/*
2619 	 * Unicast.  Make sure it's not for the bridge.
2620 	 */
2621 	do { GRAB_OUR_PACKETS(bifp) } while (0);
2622 
2623 	/*
2624 	 * Give a chance for ifp at first priority. This will help when	the
2625 	 * packet comes through the interface like VLAN's with the same MACs
2626 	 * on several interfaces from the same bridge. This also will save
2627 	 * some CPU cycles in case the destination interface and the input
2628 	 * interface (eq ifp) are the same.
2629 	 */
2630 	do { GRAB_OUR_PACKETS(ifp) } while (0);
2631 
2632 	/* Now check the all bridge members. */
2633 	CK_LIST_FOREACH(bif2, &sc->sc_iflist, bif_next) {
2634 		GRAB_OUR_PACKETS(bif2->bif_ifp)
2635 	}
2636 
2637 #undef OR_CARP_CHECK_WE_ARE_DST
2638 #undef OR_CARP_CHECK_WE_ARE_SRC
2639 #undef OR_PFIL_HOOKED_INET6
2640 #undef GRAB_OUR_PACKETS
2641 
2642 	/* Perform the bridge forwarding function. */
2643 	bridge_forward(sc, bif, m);
2644 
2645 	return (NULL);
2646 }
2647 
2648 /*
2649  * bridge_broadcast:
2650  *
2651  *	Send a frame to all interfaces that are members of
2652  *	the bridge, except for the one on which the packet
2653  *	arrived.
2654  *
2655  *	NOTE: Releases the lock on return.
2656  */
2657 static void
2658 bridge_broadcast(struct bridge_softc *sc, struct ifnet *src_if,
2659     struct mbuf *m, int runfilt)
2660 {
2661 	struct bridge_iflist *dbif, *sbif;
2662 	struct mbuf *mc;
2663 	struct ifnet *dst_if;
2664 	int used = 0, i;
2665 
2666 	NET_EPOCH_ASSERT();
2667 
2668 	sbif = bridge_lookup_member_if(sc, src_if);
2669 
2670 	/* Filter on the bridge interface before broadcasting */
2671 	if (runfilt && (PFIL_HOOKED_OUT(V_inet_pfil_head)
2672 #ifdef INET6
2673 	    || PFIL_HOOKED_OUT(V_inet6_pfil_head)
2674 #endif
2675 	    )) {
2676 		if (bridge_pfil(&m, sc->sc_ifp, NULL, PFIL_OUT) != 0)
2677 			return;
2678 		if (m == NULL)
2679 			return;
2680 	}
2681 
2682 	CK_LIST_FOREACH(dbif, &sc->sc_iflist, bif_next) {
2683 		dst_if = dbif->bif_ifp;
2684 		if (dst_if == src_if)
2685 			continue;
2686 
2687 		/* Private segments can not talk to each other */
2688 		if (sbif && (sbif->bif_flags & dbif->bif_flags & IFBIF_PRIVATE))
2689 			continue;
2690 
2691 		if ((dbif->bif_flags & IFBIF_STP) &&
2692 		    dbif->bif_stp.bp_state == BSTP_IFSTATE_DISCARDING)
2693 			continue;
2694 
2695 		if ((dbif->bif_flags & IFBIF_DISCOVER) == 0 &&
2696 		    (m->m_flags & (M_BCAST|M_MCAST)) == 0)
2697 			continue;
2698 
2699 		if ((dst_if->if_drv_flags & IFF_DRV_RUNNING) == 0)
2700 			continue;
2701 
2702 		if (CK_LIST_NEXT(dbif, bif_next) == NULL) {
2703 			mc = m;
2704 			used = 1;
2705 		} else {
2706 			mc = m_dup(m, M_NOWAIT);
2707 			if (mc == NULL) {
2708 				if_inc_counter(sc->sc_ifp, IFCOUNTER_OERRORS, 1);
2709 				continue;
2710 			}
2711 		}
2712 
2713 		/*
2714 		 * Filter on the output interface. Pass a NULL bridge interface
2715 		 * pointer so we do not redundantly filter on the bridge for
2716 		 * each interface we broadcast on.
2717 		 */
2718 		if (runfilt && (PFIL_HOOKED_OUT(V_inet_pfil_head)
2719 #ifdef INET6
2720 		    || PFIL_HOOKED_OUT(V_inet6_pfil_head)
2721 #endif
2722 		    )) {
2723 			if (used == 0) {
2724 				/* Keep the layer3 header aligned */
2725 				i = min(mc->m_pkthdr.len, max_protohdr);
2726 				mc = m_copyup(mc, i, ETHER_ALIGN);
2727 				if (mc == NULL) {
2728 					if_inc_counter(sc->sc_ifp, IFCOUNTER_OERRORS, 1);
2729 					continue;
2730 				}
2731 			}
2732 			if (bridge_pfil(&mc, NULL, dst_if, PFIL_OUT) != 0)
2733 				continue;
2734 			if (mc == NULL)
2735 				continue;
2736 		}
2737 
2738 		bridge_enqueue(sc, dst_if, mc);
2739 	}
2740 	if (used == 0)
2741 		m_freem(m);
2742 }
2743 
2744 /*
2745  * bridge_span:
2746  *
2747  *	Duplicate a packet out one or more interfaces that are in span mode,
2748  *	the original mbuf is unmodified.
2749  */
2750 static void
2751 bridge_span(struct bridge_softc *sc, struct mbuf *m)
2752 {
2753 	struct bridge_iflist *bif;
2754 	struct ifnet *dst_if;
2755 	struct mbuf *mc;
2756 
2757 	NET_EPOCH_ASSERT();
2758 
2759 	if (CK_LIST_EMPTY(&sc->sc_spanlist))
2760 		return;
2761 
2762 	CK_LIST_FOREACH(bif, &sc->sc_spanlist, bif_next) {
2763 		dst_if = bif->bif_ifp;
2764 
2765 		if ((dst_if->if_drv_flags & IFF_DRV_RUNNING) == 0)
2766 			continue;
2767 
2768 		mc = m_dup(m, M_NOWAIT);
2769 		if (mc == NULL) {
2770 			if_inc_counter(sc->sc_ifp, IFCOUNTER_OERRORS, 1);
2771 			continue;
2772 		}
2773 
2774 		bridge_enqueue(sc, dst_if, mc);
2775 	}
2776 }
2777 
2778 /*
2779  * bridge_rtupdate:
2780  *
2781  *	Add a bridge routing entry.
2782  */
2783 static int
2784 bridge_rtupdate(struct bridge_softc *sc, const uint8_t *dst, uint16_t vlan,
2785     struct bridge_iflist *bif, int setflags, uint8_t flags)
2786 {
2787 	struct bridge_rtnode *brt;
2788 	int error;
2789 
2790 	BRIDGE_LOCK_OR_NET_EPOCH_ASSERT(sc);
2791 
2792 	/* Check the source address is valid and not multicast. */
2793 	if (ETHER_IS_MULTICAST(dst) ||
2794 	    (dst[0] == 0 && dst[1] == 0 && dst[2] == 0 &&
2795 	     dst[3] == 0 && dst[4] == 0 && dst[5] == 0) != 0)
2796 		return (EINVAL);
2797 
2798 	/* 802.1p frames map to vlan 1 */
2799 	if (vlan == 0)
2800 		vlan = 1;
2801 
2802 	/*
2803 	 * A route for this destination might already exist.  If so,
2804 	 * update it, otherwise create a new one.
2805 	 */
2806 	if ((brt = bridge_rtnode_lookup(sc, dst, vlan)) == NULL) {
2807 		BRIDGE_RT_LOCK(sc);
2808 
2809 		/* Check again, now that we have the lock. There could have
2810 		 * been a race and we only want to insert this once. */
2811 		if ((brt = bridge_rtnode_lookup(sc, dst, vlan)) != NULL) {
2812 			BRIDGE_RT_UNLOCK(sc);
2813 			return (0);
2814 		}
2815 
2816 		if (sc->sc_brtcnt >= sc->sc_brtmax) {
2817 			sc->sc_brtexceeded++;
2818 			BRIDGE_RT_UNLOCK(sc);
2819 			return (ENOSPC);
2820 		}
2821 		/* Check per interface address limits (if enabled) */
2822 		if (bif->bif_addrmax && bif->bif_addrcnt >= bif->bif_addrmax) {
2823 			bif->bif_addrexceeded++;
2824 			BRIDGE_RT_UNLOCK(sc);
2825 			return (ENOSPC);
2826 		}
2827 
2828 		/*
2829 		 * Allocate a new bridge forwarding node, and
2830 		 * initialize the expiration time and Ethernet
2831 		 * address.
2832 		 */
2833 		brt = uma_zalloc(V_bridge_rtnode_zone, M_NOWAIT | M_ZERO);
2834 		if (brt == NULL) {
2835 			BRIDGE_RT_UNLOCK(sc);
2836 			return (ENOMEM);
2837 		}
2838 		brt->brt_vnet = curvnet;
2839 
2840 		if (bif->bif_flags & IFBIF_STICKY)
2841 			brt->brt_flags = IFBAF_STICKY;
2842 		else
2843 			brt->brt_flags = IFBAF_DYNAMIC;
2844 
2845 		memcpy(brt->brt_addr, dst, ETHER_ADDR_LEN);
2846 		brt->brt_vlan = vlan;
2847 
2848 		if ((error = bridge_rtnode_insert(sc, brt)) != 0) {
2849 			uma_zfree(V_bridge_rtnode_zone, brt);
2850 			BRIDGE_RT_UNLOCK(sc);
2851 			return (error);
2852 		}
2853 		brt->brt_dst = bif;
2854 		bif->bif_addrcnt++;
2855 
2856 		BRIDGE_RT_UNLOCK(sc);
2857 	}
2858 
2859 	if ((brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC &&
2860 	    brt->brt_dst != bif) {
2861 		BRIDGE_RT_LOCK(sc);
2862 		brt->brt_dst->bif_addrcnt--;
2863 		brt->brt_dst = bif;
2864 		brt->brt_dst->bif_addrcnt++;
2865 		BRIDGE_RT_UNLOCK(sc);
2866 	}
2867 
2868 	if ((flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC)
2869 		brt->brt_expire = time_uptime + sc->sc_brttimeout;
2870 	if (setflags)
2871 		brt->brt_flags = flags;
2872 
2873 	return (0);
2874 }
2875 
2876 /*
2877  * bridge_rtlookup:
2878  *
2879  *	Lookup the destination interface for an address.
2880  */
2881 static struct ifnet *
2882 bridge_rtlookup(struct bridge_softc *sc, const uint8_t *addr, uint16_t vlan)
2883 {
2884 	struct bridge_rtnode *brt;
2885 
2886 	NET_EPOCH_ASSERT();
2887 
2888 	if ((brt = bridge_rtnode_lookup(sc, addr, vlan)) == NULL)
2889 		return (NULL);
2890 
2891 	return (brt->brt_ifp);
2892 }
2893 
2894 /*
2895  * bridge_rttrim:
2896  *
2897  *	Trim the routine table so that we have a number
2898  *	of routing entries less than or equal to the
2899  *	maximum number.
2900  */
2901 static void
2902 bridge_rttrim(struct bridge_softc *sc)
2903 {
2904 	struct bridge_rtnode *brt, *nbrt;
2905 
2906 	NET_EPOCH_ASSERT();
2907 	BRIDGE_RT_LOCK_ASSERT(sc);
2908 
2909 	/* Make sure we actually need to do this. */
2910 	if (sc->sc_brtcnt <= sc->sc_brtmax)
2911 		return;
2912 
2913 	/* Force an aging cycle; this might trim enough addresses. */
2914 	bridge_rtage(sc);
2915 	if (sc->sc_brtcnt <= sc->sc_brtmax)
2916 		return;
2917 
2918 	CK_LIST_FOREACH_SAFE(brt, &sc->sc_rtlist, brt_list, nbrt) {
2919 		if ((brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC) {
2920 			bridge_rtnode_destroy(sc, brt);
2921 			if (sc->sc_brtcnt <= sc->sc_brtmax)
2922 				return;
2923 		}
2924 	}
2925 }
2926 
2927 /*
2928  * bridge_timer:
2929  *
2930  *	Aging timer for the bridge.
2931  */
2932 static void
2933 bridge_timer(void *arg)
2934 {
2935 	struct bridge_softc *sc = arg;
2936 
2937 	BRIDGE_RT_LOCK_ASSERT(sc);
2938 
2939 	/* Destruction of rtnodes requires a proper vnet context */
2940 	CURVNET_SET(sc->sc_ifp->if_vnet);
2941 	bridge_rtage(sc);
2942 
2943 	if (sc->sc_ifp->if_drv_flags & IFF_DRV_RUNNING)
2944 		callout_reset(&sc->sc_brcallout,
2945 		    bridge_rtable_prune_period * hz, bridge_timer, sc);
2946 	CURVNET_RESTORE();
2947 }
2948 
2949 /*
2950  * bridge_rtage:
2951  *
2952  *	Perform an aging cycle.
2953  */
2954 static void
2955 bridge_rtage(struct bridge_softc *sc)
2956 {
2957 	struct bridge_rtnode *brt, *nbrt;
2958 
2959 	BRIDGE_RT_LOCK_ASSERT(sc);
2960 
2961 	CK_LIST_FOREACH_SAFE(brt, &sc->sc_rtlist, brt_list, nbrt) {
2962 		if ((brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC) {
2963 			if (time_uptime >= brt->brt_expire)
2964 				bridge_rtnode_destroy(sc, brt);
2965 		}
2966 	}
2967 }
2968 
2969 /*
2970  * bridge_rtflush:
2971  *
2972  *	Remove all dynamic addresses from the bridge.
2973  */
2974 static void
2975 bridge_rtflush(struct bridge_softc *sc, int full)
2976 {
2977 	struct bridge_rtnode *brt, *nbrt;
2978 
2979 	BRIDGE_RT_LOCK_ASSERT(sc);
2980 
2981 	CK_LIST_FOREACH_SAFE(brt, &sc->sc_rtlist, brt_list, nbrt) {
2982 		if (full || (brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC)
2983 			bridge_rtnode_destroy(sc, brt);
2984 	}
2985 }
2986 
2987 /*
2988  * bridge_rtdaddr:
2989  *
2990  *	Remove an address from the table.
2991  */
2992 static int
2993 bridge_rtdaddr(struct bridge_softc *sc, const uint8_t *addr, uint16_t vlan)
2994 {
2995 	struct bridge_rtnode *brt;
2996 	int found = 0;
2997 
2998 	BRIDGE_RT_LOCK(sc);
2999 
3000 	/*
3001 	 * If vlan is zero then we want to delete for all vlans so the lookup
3002 	 * may return more than one.
3003 	 */
3004 	while ((brt = bridge_rtnode_lookup(sc, addr, vlan)) != NULL) {
3005 		bridge_rtnode_destroy(sc, brt);
3006 		found = 1;
3007 	}
3008 
3009 	BRIDGE_RT_UNLOCK(sc);
3010 
3011 	return (found ? 0 : ENOENT);
3012 }
3013 
3014 /*
3015  * bridge_rtdelete:
3016  *
3017  *	Delete routes to a speicifc member interface.
3018  */
3019 static void
3020 bridge_rtdelete(struct bridge_softc *sc, struct ifnet *ifp, int full)
3021 {
3022 	struct bridge_rtnode *brt, *nbrt;
3023 
3024 	BRIDGE_RT_LOCK_ASSERT(sc);
3025 
3026 	CK_LIST_FOREACH_SAFE(brt, &sc->sc_rtlist, brt_list, nbrt) {
3027 		if (brt->brt_ifp == ifp && (full ||
3028 			    (brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC))
3029 			bridge_rtnode_destroy(sc, brt);
3030 	}
3031 }
3032 
3033 /*
3034  * bridge_rtable_init:
3035  *
3036  *	Initialize the route table for this bridge.
3037  */
3038 static void
3039 bridge_rtable_init(struct bridge_softc *sc)
3040 {
3041 	int i;
3042 
3043 	sc->sc_rthash = malloc(sizeof(*sc->sc_rthash) * BRIDGE_RTHASH_SIZE,
3044 	    M_DEVBUF, M_WAITOK);
3045 
3046 	for (i = 0; i < BRIDGE_RTHASH_SIZE; i++)
3047 		CK_LIST_INIT(&sc->sc_rthash[i]);
3048 
3049 	sc->sc_rthash_key = arc4random();
3050 	CK_LIST_INIT(&sc->sc_rtlist);
3051 }
3052 
3053 /*
3054  * bridge_rtable_fini:
3055  *
3056  *	Deconstruct the route table for this bridge.
3057  */
3058 static void
3059 bridge_rtable_fini(struct bridge_softc *sc)
3060 {
3061 
3062 	KASSERT(sc->sc_brtcnt == 0,
3063 	    ("%s: %d bridge routes referenced", __func__, sc->sc_brtcnt));
3064 	free(sc->sc_rthash, M_DEVBUF);
3065 }
3066 
3067 /*
3068  * The following hash function is adapted from "Hash Functions" by Bob Jenkins
3069  * ("Algorithm Alley", Dr. Dobbs Journal, September 1997).
3070  */
3071 #define	mix(a, b, c)							\
3072 do {									\
3073 	a -= b; a -= c; a ^= (c >> 13);					\
3074 	b -= c; b -= a; b ^= (a << 8);					\
3075 	c -= a; c -= b; c ^= (b >> 13);					\
3076 	a -= b; a -= c; a ^= (c >> 12);					\
3077 	b -= c; b -= a; b ^= (a << 16);					\
3078 	c -= a; c -= b; c ^= (b >> 5);					\
3079 	a -= b; a -= c; a ^= (c >> 3);					\
3080 	b -= c; b -= a; b ^= (a << 10);					\
3081 	c -= a; c -= b; c ^= (b >> 15);					\
3082 } while (/*CONSTCOND*/0)
3083 
3084 static __inline uint32_t
3085 bridge_rthash(struct bridge_softc *sc, const uint8_t *addr)
3086 {
3087 	uint32_t a = 0x9e3779b9, b = 0x9e3779b9, c = sc->sc_rthash_key;
3088 
3089 	b += addr[5] << 8;
3090 	b += addr[4];
3091 	a += addr[3] << 24;
3092 	a += addr[2] << 16;
3093 	a += addr[1] << 8;
3094 	a += addr[0];
3095 
3096 	mix(a, b, c);
3097 
3098 	return (c & BRIDGE_RTHASH_MASK);
3099 }
3100 
3101 #undef mix
3102 
3103 static int
3104 bridge_rtnode_addr_cmp(const uint8_t *a, const uint8_t *b)
3105 {
3106 	int i, d;
3107 
3108 	for (i = 0, d = 0; i < ETHER_ADDR_LEN && d == 0; i++) {
3109 		d = ((int)a[i]) - ((int)b[i]);
3110 	}
3111 
3112 	return (d);
3113 }
3114 
3115 /*
3116  * bridge_rtnode_lookup:
3117  *
3118  *	Look up a bridge route node for the specified destination. Compare the
3119  *	vlan id or if zero then just return the first match.
3120  */
3121 static struct bridge_rtnode *
3122 bridge_rtnode_lookup(struct bridge_softc *sc, const uint8_t *addr, uint16_t vlan)
3123 {
3124 	struct bridge_rtnode *brt;
3125 	uint32_t hash;
3126 	int dir;
3127 
3128 	BRIDGE_RT_LOCK_OR_NET_EPOCH_ASSERT(sc);
3129 
3130 	hash = bridge_rthash(sc, addr);
3131 	CK_LIST_FOREACH(brt, &sc->sc_rthash[hash], brt_hash) {
3132 		dir = bridge_rtnode_addr_cmp(addr, brt->brt_addr);
3133 		if (dir == 0 && (brt->brt_vlan == vlan || vlan == 0))
3134 			return (brt);
3135 		if (dir > 0)
3136 			return (NULL);
3137 	}
3138 
3139 	return (NULL);
3140 }
3141 
3142 /*
3143  * bridge_rtnode_insert:
3144  *
3145  *	Insert the specified bridge node into the route table.  We
3146  *	assume the entry is not already in the table.
3147  */
3148 static int
3149 bridge_rtnode_insert(struct bridge_softc *sc, struct bridge_rtnode *brt)
3150 {
3151 	struct bridge_rtnode *lbrt;
3152 	uint32_t hash;
3153 	int dir;
3154 
3155 	BRIDGE_RT_LOCK_ASSERT(sc);
3156 
3157 	hash = bridge_rthash(sc, brt->brt_addr);
3158 
3159 	lbrt = CK_LIST_FIRST(&sc->sc_rthash[hash]);
3160 	if (lbrt == NULL) {
3161 		CK_LIST_INSERT_HEAD(&sc->sc_rthash[hash], brt, brt_hash);
3162 		goto out;
3163 	}
3164 
3165 	do {
3166 		dir = bridge_rtnode_addr_cmp(brt->brt_addr, lbrt->brt_addr);
3167 		if (dir == 0 && brt->brt_vlan == lbrt->brt_vlan)
3168 			return (EEXIST);
3169 		if (dir > 0) {
3170 			CK_LIST_INSERT_BEFORE(lbrt, brt, brt_hash);
3171 			goto out;
3172 		}
3173 		if (CK_LIST_NEXT(lbrt, brt_hash) == NULL) {
3174 			CK_LIST_INSERT_AFTER(lbrt, brt, brt_hash);
3175 			goto out;
3176 		}
3177 		lbrt = CK_LIST_NEXT(lbrt, brt_hash);
3178 	} while (lbrt != NULL);
3179 
3180 #ifdef DIAGNOSTIC
3181 	panic("bridge_rtnode_insert: impossible");
3182 #endif
3183 
3184 out:
3185 	CK_LIST_INSERT_HEAD(&sc->sc_rtlist, brt, brt_list);
3186 	sc->sc_brtcnt++;
3187 
3188 	return (0);
3189 }
3190 
3191 static void
3192 bridge_rtnode_destroy_cb(struct epoch_context *ctx)
3193 {
3194 	struct bridge_rtnode *brt;
3195 
3196 	brt = __containerof(ctx, struct bridge_rtnode, brt_epoch_ctx);
3197 
3198 	CURVNET_SET(brt->brt_vnet);
3199 	uma_zfree(V_bridge_rtnode_zone, brt);
3200 	CURVNET_RESTORE();
3201 }
3202 
3203 /*
3204  * bridge_rtnode_destroy:
3205  *
3206  *	Destroy a bridge rtnode.
3207  */
3208 static void
3209 bridge_rtnode_destroy(struct bridge_softc *sc, struct bridge_rtnode *brt)
3210 {
3211 	BRIDGE_RT_LOCK_ASSERT(sc);
3212 
3213 	CK_LIST_REMOVE(brt, brt_hash);
3214 
3215 	CK_LIST_REMOVE(brt, brt_list);
3216 	sc->sc_brtcnt--;
3217 	brt->brt_dst->bif_addrcnt--;
3218 
3219 	NET_EPOCH_CALL(bridge_rtnode_destroy_cb, &brt->brt_epoch_ctx);
3220 }
3221 
3222 /*
3223  * bridge_rtable_expire:
3224  *
3225  *	Set the expiry time for all routes on an interface.
3226  */
3227 static void
3228 bridge_rtable_expire(struct ifnet *ifp, int age)
3229 {
3230 	struct bridge_softc *sc = ifp->if_bridge;
3231 	struct bridge_rtnode *brt;
3232 
3233 	CURVNET_SET(ifp->if_vnet);
3234 	BRIDGE_RT_LOCK(sc);
3235 
3236 	/*
3237 	 * If the age is zero then flush, otherwise set all the expiry times to
3238 	 * age for the interface
3239 	 */
3240 	if (age == 0)
3241 		bridge_rtdelete(sc, ifp, IFBF_FLUSHDYN);
3242 	else {
3243 		CK_LIST_FOREACH(brt, &sc->sc_rtlist, brt_list) {
3244 			/* Cap the expiry time to 'age' */
3245 			if (brt->brt_ifp == ifp &&
3246 			    brt->brt_expire > time_uptime + age &&
3247 			    (brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC)
3248 				brt->brt_expire = time_uptime + age;
3249 		}
3250 	}
3251 	BRIDGE_RT_UNLOCK(sc);
3252 	CURVNET_RESTORE();
3253 }
3254 
3255 /*
3256  * bridge_state_change:
3257  *
3258  *	Callback from the bridgestp code when a port changes states.
3259  */
3260 static void
3261 bridge_state_change(struct ifnet *ifp, int state)
3262 {
3263 	struct bridge_softc *sc = ifp->if_bridge;
3264 	static const char *stpstates[] = {
3265 		"disabled",
3266 		"listening",
3267 		"learning",
3268 		"forwarding",
3269 		"blocking",
3270 		"discarding"
3271 	};
3272 
3273 	CURVNET_SET(ifp->if_vnet);
3274 	if (V_log_stp)
3275 		log(LOG_NOTICE, "%s: state changed to %s on %s\n",
3276 		    sc->sc_ifp->if_xname, stpstates[state], ifp->if_xname);
3277 	CURVNET_RESTORE();
3278 }
3279 
3280 /*
3281  * Send bridge packets through pfil if they are one of the types pfil can deal
3282  * with, or if they are ARP or REVARP.  (pfil will pass ARP and REVARP without
3283  * question.) If *bifp or *ifp are NULL then packet filtering is skipped for
3284  * that interface.
3285  */
3286 static int
3287 bridge_pfil(struct mbuf **mp, struct ifnet *bifp, struct ifnet *ifp, int dir)
3288 {
3289 	int snap, error, i, hlen;
3290 	struct ether_header *eh1, eh2;
3291 	struct ip *ip;
3292 	struct llc llc1;
3293 	u_int16_t ether_type;
3294 	pfil_return_t rv;
3295 
3296 	snap = 0;
3297 	error = -1;	/* Default error if not error == 0 */
3298 
3299 #if 0
3300 	/* we may return with the IP fields swapped, ensure its not shared */
3301 	KASSERT(M_WRITABLE(*mp), ("%s: modifying a shared mbuf", __func__));
3302 #endif
3303 
3304 	if (V_pfil_bridge == 0 && V_pfil_member == 0 && V_pfil_ipfw == 0)
3305 		return (0); /* filtering is disabled */
3306 
3307 	i = min((*mp)->m_pkthdr.len, max_protohdr);
3308 	if ((*mp)->m_len < i) {
3309 	    *mp = m_pullup(*mp, i);
3310 	    if (*mp == NULL) {
3311 		printf("%s: m_pullup failed\n", __func__);
3312 		return (-1);
3313 	    }
3314 	}
3315 
3316 	eh1 = mtod(*mp, struct ether_header *);
3317 	ether_type = ntohs(eh1->ether_type);
3318 
3319 	/*
3320 	 * Check for SNAP/LLC.
3321 	 */
3322 	if (ether_type < ETHERMTU) {
3323 		struct llc *llc2 = (struct llc *)(eh1 + 1);
3324 
3325 		if ((*mp)->m_len >= ETHER_HDR_LEN + 8 &&
3326 		    llc2->llc_dsap == LLC_SNAP_LSAP &&
3327 		    llc2->llc_ssap == LLC_SNAP_LSAP &&
3328 		    llc2->llc_control == LLC_UI) {
3329 			ether_type = htons(llc2->llc_un.type_snap.ether_type);
3330 			snap = 1;
3331 		}
3332 	}
3333 
3334 	/*
3335 	 * If we're trying to filter bridge traffic, don't look at anything
3336 	 * other than IP and ARP traffic.  If the filter doesn't understand
3337 	 * IPv6, don't allow IPv6 through the bridge either.  This is lame
3338 	 * since if we really wanted, say, an AppleTalk filter, we are hosed,
3339 	 * but of course we don't have an AppleTalk filter to begin with.
3340 	 * (Note that since pfil doesn't understand ARP it will pass *ALL*
3341 	 * ARP traffic.)
3342 	 */
3343 	switch (ether_type) {
3344 		case ETHERTYPE_ARP:
3345 		case ETHERTYPE_REVARP:
3346 			if (V_pfil_ipfw_arp == 0)
3347 				return (0); /* Automatically pass */
3348 			break;
3349 
3350 		case ETHERTYPE_IP:
3351 #ifdef INET6
3352 		case ETHERTYPE_IPV6:
3353 #endif /* INET6 */
3354 			break;
3355 		default:
3356 			/*
3357 			 * Check to see if the user wants to pass non-ip
3358 			 * packets, these will not be checked by pfil(9) and
3359 			 * passed unconditionally so the default is to drop.
3360 			 */
3361 			if (V_pfil_onlyip)
3362 				goto bad;
3363 	}
3364 
3365 	/* Run the packet through pfil before stripping link headers */
3366 	if (PFIL_HOOKED_OUT(V_link_pfil_head) && V_pfil_ipfw != 0 &&
3367 	    dir == PFIL_OUT && ifp != NULL) {
3368 		switch (pfil_mbuf_out(V_link_pfil_head, mp, ifp, NULL)) {
3369 		case PFIL_DROPPED:
3370 			return (EACCES);
3371 		case PFIL_CONSUMED:
3372 			return (0);
3373 		}
3374 	}
3375 
3376 	/* Strip off the Ethernet header and keep a copy. */
3377 	m_copydata(*mp, 0, ETHER_HDR_LEN, (caddr_t) &eh2);
3378 	m_adj(*mp, ETHER_HDR_LEN);
3379 
3380 	/* Strip off snap header, if present */
3381 	if (snap) {
3382 		m_copydata(*mp, 0, sizeof(struct llc), (caddr_t) &llc1);
3383 		m_adj(*mp, sizeof(struct llc));
3384 	}
3385 
3386 	/*
3387 	 * Check the IP header for alignment and errors
3388 	 */
3389 	if (dir == PFIL_IN) {
3390 		switch (ether_type) {
3391 			case ETHERTYPE_IP:
3392 				error = bridge_ip_checkbasic(mp);
3393 				break;
3394 #ifdef INET6
3395 			case ETHERTYPE_IPV6:
3396 				error = bridge_ip6_checkbasic(mp);
3397 				break;
3398 #endif /* INET6 */
3399 			default:
3400 				error = 0;
3401 		}
3402 		if (error)
3403 			goto bad;
3404 	}
3405 
3406 	error = 0;
3407 
3408 	/*
3409 	 * Run the packet through pfil
3410 	 */
3411 	rv = PFIL_PASS;
3412 	switch (ether_type) {
3413 	case ETHERTYPE_IP:
3414 		/*
3415 		 * Run pfil on the member interface and the bridge, both can
3416 		 * be skipped by clearing pfil_member or pfil_bridge.
3417 		 *
3418 		 * Keep the order:
3419 		 *   in_if -> bridge_if -> out_if
3420 		 */
3421 		if (V_pfil_bridge && dir == PFIL_OUT && bifp != NULL && (rv =
3422 		    pfil_mbuf_out(V_inet_pfil_head, mp, bifp, NULL)) !=
3423 		    PFIL_PASS)
3424 			break;
3425 
3426 		if (V_pfil_member && ifp != NULL) {
3427 			rv = (dir == PFIL_OUT) ?
3428 			    pfil_mbuf_out(V_inet_pfil_head, mp, ifp, NULL) :
3429 			    pfil_mbuf_in(V_inet_pfil_head, mp, ifp, NULL);
3430 			if (rv != PFIL_PASS)
3431 				break;
3432 		}
3433 
3434 		if (V_pfil_bridge && dir == PFIL_IN && bifp != NULL && (rv =
3435 		    pfil_mbuf_in(V_inet_pfil_head, mp, bifp, NULL)) !=
3436 		    PFIL_PASS)
3437 			break;
3438 
3439 		/* check if we need to fragment the packet */
3440 		/* bridge_fragment generates a mbuf chain of packets */
3441 		/* that already include eth headers */
3442 		if (V_pfil_member && ifp != NULL && dir == PFIL_OUT) {
3443 			i = (*mp)->m_pkthdr.len;
3444 			if (i > ifp->if_mtu) {
3445 				error = bridge_fragment(ifp, mp, &eh2, snap,
3446 					    &llc1);
3447 				return (error);
3448 			}
3449 		}
3450 
3451 		/* Recalculate the ip checksum. */
3452 		ip = mtod(*mp, struct ip *);
3453 		hlen = ip->ip_hl << 2;
3454 		if (hlen < sizeof(struct ip))
3455 			goto bad;
3456 		if (hlen > (*mp)->m_len) {
3457 			if ((*mp = m_pullup(*mp, hlen)) == NULL)
3458 				goto bad;
3459 			ip = mtod(*mp, struct ip *);
3460 			if (ip == NULL)
3461 				goto bad;
3462 		}
3463 		ip->ip_sum = 0;
3464 		if (hlen == sizeof(struct ip))
3465 			ip->ip_sum = in_cksum_hdr(ip);
3466 		else
3467 			ip->ip_sum = in_cksum(*mp, hlen);
3468 
3469 		break;
3470 #ifdef INET6
3471 	case ETHERTYPE_IPV6:
3472 		if (V_pfil_bridge && dir == PFIL_OUT && bifp != NULL && (rv =
3473 		    pfil_mbuf_out(V_inet6_pfil_head, mp, bifp, NULL)) !=
3474 		    PFIL_PASS)
3475 			break;
3476 
3477 		if (V_pfil_member && ifp != NULL) {
3478 			rv = (dir == PFIL_OUT) ?
3479 			    pfil_mbuf_out(V_inet6_pfil_head, mp, ifp, NULL) :
3480 			    pfil_mbuf_in(V_inet6_pfil_head, mp, ifp, NULL);
3481 			if (rv != PFIL_PASS)
3482 				break;
3483 		}
3484 
3485 		if (V_pfil_bridge && dir == PFIL_IN && bifp != NULL && (rv =
3486 		    pfil_mbuf_in(V_inet6_pfil_head, mp, bifp, NULL)) !=
3487 		    PFIL_PASS)
3488 			break;
3489 		break;
3490 #endif
3491 	}
3492 
3493 	switch (rv) {
3494 	case PFIL_CONSUMED:
3495 		return (0);
3496 	case PFIL_DROPPED:
3497 		return (EACCES);
3498 	default:
3499 		break;
3500 	}
3501 
3502 	error = -1;
3503 
3504 	/*
3505 	 * Finally, put everything back the way it was and return
3506 	 */
3507 	if (snap) {
3508 		M_PREPEND(*mp, sizeof(struct llc), M_NOWAIT);
3509 		if (*mp == NULL)
3510 			return (error);
3511 		bcopy(&llc1, mtod(*mp, caddr_t), sizeof(struct llc));
3512 	}
3513 
3514 	M_PREPEND(*mp, ETHER_HDR_LEN, M_NOWAIT);
3515 	if (*mp == NULL)
3516 		return (error);
3517 	bcopy(&eh2, mtod(*mp, caddr_t), ETHER_HDR_LEN);
3518 
3519 	return (0);
3520 
3521 bad:
3522 	m_freem(*mp);
3523 	*mp = NULL;
3524 	return (error);
3525 }
3526 
3527 /*
3528  * Perform basic checks on header size since
3529  * pfil assumes ip_input has already processed
3530  * it for it.  Cut-and-pasted from ip_input.c.
3531  * Given how simple the IPv6 version is,
3532  * does the IPv4 version really need to be
3533  * this complicated?
3534  *
3535  * XXX Should we update ipstat here, or not?
3536  * XXX Right now we update ipstat but not
3537  * XXX csum_counter.
3538  */
3539 static int
3540 bridge_ip_checkbasic(struct mbuf **mp)
3541 {
3542 	struct mbuf *m = *mp;
3543 	struct ip *ip;
3544 	int len, hlen;
3545 	u_short sum;
3546 
3547 	if (*mp == NULL)
3548 		return (-1);
3549 
3550 	if (IP_HDR_ALIGNED_P(mtod(m, caddr_t)) == 0) {
3551 		if ((m = m_copyup(m, sizeof(struct ip),
3552 			(max_linkhdr + 3) & ~3)) == NULL) {
3553 			/* XXXJRT new stat, please */
3554 			KMOD_IPSTAT_INC(ips_toosmall);
3555 			goto bad;
3556 		}
3557 	} else if (__predict_false(m->m_len < sizeof (struct ip))) {
3558 		if ((m = m_pullup(m, sizeof (struct ip))) == NULL) {
3559 			KMOD_IPSTAT_INC(ips_toosmall);
3560 			goto bad;
3561 		}
3562 	}
3563 	ip = mtod(m, struct ip *);
3564 	if (ip == NULL) goto bad;
3565 
3566 	if (ip->ip_v != IPVERSION) {
3567 		KMOD_IPSTAT_INC(ips_badvers);
3568 		goto bad;
3569 	}
3570 	hlen = ip->ip_hl << 2;
3571 	if (hlen < sizeof(struct ip)) { /* minimum header length */
3572 		KMOD_IPSTAT_INC(ips_badhlen);
3573 		goto bad;
3574 	}
3575 	if (hlen > m->m_len) {
3576 		if ((m = m_pullup(m, hlen)) == NULL) {
3577 			KMOD_IPSTAT_INC(ips_badhlen);
3578 			goto bad;
3579 		}
3580 		ip = mtod(m, struct ip *);
3581 		if (ip == NULL) goto bad;
3582 	}
3583 
3584 	if (m->m_pkthdr.csum_flags & CSUM_IP_CHECKED) {
3585 		sum = !(m->m_pkthdr.csum_flags & CSUM_IP_VALID);
3586 	} else {
3587 		if (hlen == sizeof(struct ip)) {
3588 			sum = in_cksum_hdr(ip);
3589 		} else {
3590 			sum = in_cksum(m, hlen);
3591 		}
3592 	}
3593 	if (sum) {
3594 		KMOD_IPSTAT_INC(ips_badsum);
3595 		goto bad;
3596 	}
3597 
3598 	/* Retrieve the packet length. */
3599 	len = ntohs(ip->ip_len);
3600 
3601 	/*
3602 	 * Check for additional length bogosity
3603 	 */
3604 	if (len < hlen) {
3605 		KMOD_IPSTAT_INC(ips_badlen);
3606 		goto bad;
3607 	}
3608 
3609 	/*
3610 	 * Check that the amount of data in the buffers
3611 	 * is as at least much as the IP header would have us expect.
3612 	 * Drop packet if shorter than we expect.
3613 	 */
3614 	if (m->m_pkthdr.len < len) {
3615 		KMOD_IPSTAT_INC(ips_tooshort);
3616 		goto bad;
3617 	}
3618 
3619 	/* Checks out, proceed */
3620 	*mp = m;
3621 	return (0);
3622 
3623 bad:
3624 	*mp = m;
3625 	return (-1);
3626 }
3627 
3628 #ifdef INET6
3629 /*
3630  * Same as above, but for IPv6.
3631  * Cut-and-pasted from ip6_input.c.
3632  * XXX Should we update ip6stat, or not?
3633  */
3634 static int
3635 bridge_ip6_checkbasic(struct mbuf **mp)
3636 {
3637 	struct mbuf *m = *mp;
3638 	struct ip6_hdr *ip6;
3639 
3640 	/*
3641 	 * If the IPv6 header is not aligned, slurp it up into a new
3642 	 * mbuf with space for link headers, in the event we forward
3643 	 * it.  Otherwise, if it is aligned, make sure the entire base
3644 	 * IPv6 header is in the first mbuf of the chain.
3645 	 */
3646 	if (IP6_HDR_ALIGNED_P(mtod(m, caddr_t)) == 0) {
3647 		struct ifnet *inifp = m->m_pkthdr.rcvif;
3648 		if ((m = m_copyup(m, sizeof(struct ip6_hdr),
3649 			    (max_linkhdr + 3) & ~3)) == NULL) {
3650 			/* XXXJRT new stat, please */
3651 			IP6STAT_INC(ip6s_toosmall);
3652 			in6_ifstat_inc(inifp, ifs6_in_hdrerr);
3653 			goto bad;
3654 		}
3655 	} else if (__predict_false(m->m_len < sizeof(struct ip6_hdr))) {
3656 		struct ifnet *inifp = m->m_pkthdr.rcvif;
3657 		if ((m = m_pullup(m, sizeof(struct ip6_hdr))) == NULL) {
3658 			IP6STAT_INC(ip6s_toosmall);
3659 			in6_ifstat_inc(inifp, ifs6_in_hdrerr);
3660 			goto bad;
3661 		}
3662 	}
3663 
3664 	ip6 = mtod(m, struct ip6_hdr *);
3665 
3666 	if ((ip6->ip6_vfc & IPV6_VERSION_MASK) != IPV6_VERSION) {
3667 		IP6STAT_INC(ip6s_badvers);
3668 		in6_ifstat_inc(m->m_pkthdr.rcvif, ifs6_in_hdrerr);
3669 		goto bad;
3670 	}
3671 
3672 	/* Checks out, proceed */
3673 	*mp = m;
3674 	return (0);
3675 
3676 bad:
3677 	*mp = m;
3678 	return (-1);
3679 }
3680 #endif /* INET6 */
3681 
3682 /*
3683  * bridge_fragment:
3684  *
3685  *	Fragment mbuf chain in multiple packets and prepend ethernet header.
3686  */
3687 static int
3688 bridge_fragment(struct ifnet *ifp, struct mbuf **mp, struct ether_header *eh,
3689     int snap, struct llc *llc)
3690 {
3691 	struct mbuf *m = *mp, *nextpkt = NULL, *mprev = NULL, *mcur = NULL;
3692 	struct ip *ip;
3693 	int error = -1;
3694 
3695 	if (m->m_len < sizeof(struct ip) &&
3696 	    (m = m_pullup(m, sizeof(struct ip))) == NULL)
3697 		goto dropit;
3698 	ip = mtod(m, struct ip *);
3699 
3700 	m->m_pkthdr.csum_flags |= CSUM_IP;
3701 	error = ip_fragment(ip, &m, ifp->if_mtu, ifp->if_hwassist);
3702 	if (error)
3703 		goto dropit;
3704 
3705 	/*
3706 	 * Walk the chain and re-add the Ethernet header for
3707 	 * each mbuf packet.
3708 	 */
3709 	for (mcur = m; mcur; mcur = mcur->m_nextpkt) {
3710 		nextpkt = mcur->m_nextpkt;
3711 		mcur->m_nextpkt = NULL;
3712 		if (snap) {
3713 			M_PREPEND(mcur, sizeof(struct llc), M_NOWAIT);
3714 			if (mcur == NULL) {
3715 				error = ENOBUFS;
3716 				if (mprev != NULL)
3717 					mprev->m_nextpkt = nextpkt;
3718 				goto dropit;
3719 			}
3720 			bcopy(llc, mtod(mcur, caddr_t),sizeof(struct llc));
3721 		}
3722 
3723 		M_PREPEND(mcur, ETHER_HDR_LEN, M_NOWAIT);
3724 		if (mcur == NULL) {
3725 			error = ENOBUFS;
3726 			if (mprev != NULL)
3727 				mprev->m_nextpkt = nextpkt;
3728 			goto dropit;
3729 		}
3730 		bcopy(eh, mtod(mcur, caddr_t), ETHER_HDR_LEN);
3731 
3732 		/*
3733 		 * The previous two M_PREPEND could have inserted one or two
3734 		 * mbufs in front so we have to update the previous packet's
3735 		 * m_nextpkt.
3736 		 */
3737 		mcur->m_nextpkt = nextpkt;
3738 		if (mprev != NULL)
3739 			mprev->m_nextpkt = mcur;
3740 		else {
3741 			/* The first mbuf in the original chain needs to be
3742 			 * updated. */
3743 			*mp = mcur;
3744 		}
3745 		mprev = mcur;
3746 	}
3747 
3748 	KMOD_IPSTAT_INC(ips_fragmented);
3749 	return (error);
3750 
3751 dropit:
3752 	for (mcur = *mp; mcur; mcur = m) { /* droping the full packet chain */
3753 		m = mcur->m_nextpkt;
3754 		m_freem(mcur);
3755 	}
3756 	return (error);
3757 }
3758 
3759 static void
3760 bridge_linkstate(struct ifnet *ifp)
3761 {
3762 	struct bridge_softc *sc = ifp->if_bridge;
3763 	struct bridge_iflist *bif;
3764 	struct epoch_tracker et;
3765 
3766 	NET_EPOCH_ENTER(et);
3767 
3768 	bif = bridge_lookup_member_if(sc, ifp);
3769 	if (bif == NULL) {
3770 		NET_EPOCH_EXIT(et);
3771 		return;
3772 	}
3773 	bridge_linkcheck(sc);
3774 
3775 	bstp_linkstate(&bif->bif_stp);
3776 
3777 	NET_EPOCH_EXIT(et);
3778 }
3779 
3780 static void
3781 bridge_linkcheck(struct bridge_softc *sc)
3782 {
3783 	struct bridge_iflist *bif;
3784 	int new_link, hasls;
3785 
3786 	BRIDGE_LOCK_OR_NET_EPOCH_ASSERT(sc);
3787 
3788 	new_link = LINK_STATE_DOWN;
3789 	hasls = 0;
3790 	/* Our link is considered up if at least one of our ports is active */
3791 	CK_LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
3792 		if (bif->bif_ifp->if_capabilities & IFCAP_LINKSTATE)
3793 			hasls++;
3794 		if (bif->bif_ifp->if_link_state == LINK_STATE_UP) {
3795 			new_link = LINK_STATE_UP;
3796 			break;
3797 		}
3798 	}
3799 	if (!CK_LIST_EMPTY(&sc->sc_iflist) && !hasls) {
3800 		/* If no interfaces support link-state then we default to up */
3801 		new_link = LINK_STATE_UP;
3802 	}
3803 	if_link_state_change(sc->sc_ifp, new_link);
3804 }
3805