xref: /freebsd/sys/net/if_bridge.c (revision 1e413cf93298b5b97441a21d9a50fdcd0ee9945e)
1 /*	$NetBSD: if_bridge.c,v 1.31 2005/06/01 19:45:34 jdc Exp $	*/
2 
3 /*
4  * Copyright 2001 Wasabi Systems, Inc.
5  * All rights reserved.
6  *
7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. All advertising materials mentioning features or use of this software
18  *    must display the following acknowledgement:
19  *	This product includes software developed for the NetBSD Project by
20  *	Wasabi Systems, Inc.
21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22  *    or promote products derived from this software without specific prior
23  *    written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35  * POSSIBILITY OF SUCH DAMAGE.
36  */
37 
38 /*
39  * Copyright (c) 1999, 2000 Jason L. Wright (jason@thought.net)
40  * All rights reserved.
41  *
42  * Redistribution and use in source and binary forms, with or without
43  * modification, are permitted provided that the following conditions
44  * are met:
45  * 1. Redistributions of source code must retain the above copyright
46  *    notice, this list of conditions and the following disclaimer.
47  * 2. Redistributions in binary form must reproduce the above copyright
48  *    notice, this list of conditions and the following disclaimer in the
49  *    documentation and/or other materials provided with the distribution.
50  * 3. All advertising materials mentioning features or use of this software
51  *    must display the following acknowledgement:
52  *	This product includes software developed by Jason L. Wright
53  * 4. The name of the author may not be used to endorse or promote products
54  *    derived from this software without specific prior written permission.
55  *
56  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
57  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
58  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
59  * DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
60  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
61  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
62  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
63  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
64  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
65  * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
66  * POSSIBILITY OF SUCH DAMAGE.
67  *
68  * OpenBSD: if_bridge.c,v 1.60 2001/06/15 03:38:33 itojun Exp
69  */
70 
71 /*
72  * Network interface bridge support.
73  *
74  * TODO:
75  *
76  *	- Currently only supports Ethernet-like interfaces (Ethernet,
77  *	  802.11, VLANs on Ethernet, etc.)  Figure out a nice way
78  *	  to bridge other types of interfaces (FDDI-FDDI, and maybe
79  *	  consider heterogenous bridges).
80  */
81 
82 #include <sys/cdefs.h>
83 __FBSDID("$FreeBSD$");
84 
85 #include "opt_inet.h"
86 #include "opt_inet6.h"
87 #include "opt_carp.h"
88 
89 #include <sys/param.h>
90 #include <sys/mbuf.h>
91 #include <sys/malloc.h>
92 #include <sys/protosw.h>
93 #include <sys/systm.h>
94 #include <sys/time.h>
95 #include <sys/socket.h> /* for net/if.h */
96 #include <sys/sockio.h>
97 #include <sys/ctype.h>  /* string functions */
98 #include <sys/kernel.h>
99 #include <sys/random.h>
100 #include <sys/syslog.h>
101 #include <sys/sysctl.h>
102 #include <vm/uma.h>
103 #include <sys/module.h>
104 #include <sys/priv.h>
105 #include <sys/proc.h>
106 #include <sys/lock.h>
107 #include <sys/mutex.h>
108 
109 #include <net/bpf.h>
110 #include <net/if.h>
111 #include <net/if_clone.h>
112 #include <net/if_dl.h>
113 #include <net/if_types.h>
114 #include <net/if_var.h>
115 #include <net/pfil.h>
116 
117 #include <netinet/in.h> /* for struct arpcom */
118 #include <netinet/in_systm.h>
119 #include <netinet/in_var.h>
120 #include <netinet/ip.h>
121 #include <netinet/ip_var.h>
122 #ifdef INET6
123 #include <netinet/ip6.h>
124 #include <netinet6/ip6_var.h>
125 #endif
126 #ifdef DEV_CARP
127 #include <netinet/ip_carp.h>
128 #endif
129 #include <machine/in_cksum.h>
130 #include <netinet/if_ether.h> /* for struct arpcom */
131 #include <net/bridgestp.h>
132 #include <net/if_bridgevar.h>
133 #include <net/if_llc.h>
134 #include <net/if_vlan_var.h>
135 
136 #include <net/route.h>
137 #include <netinet/ip_fw.h>
138 #include <netinet/ip_dummynet.h>
139 
140 /*
141  * Size of the route hash table.  Must be a power of two.
142  */
143 #ifndef BRIDGE_RTHASH_SIZE
144 #define	BRIDGE_RTHASH_SIZE		1024
145 #endif
146 
147 #define	BRIDGE_RTHASH_MASK		(BRIDGE_RTHASH_SIZE - 1)
148 
149 /*
150  * Maximum number of addresses to cache.
151  */
152 #ifndef BRIDGE_RTABLE_MAX
153 #define	BRIDGE_RTABLE_MAX		100
154 #endif
155 
156 /*
157  * Timeout (in seconds) for entries learned dynamically.
158  */
159 #ifndef BRIDGE_RTABLE_TIMEOUT
160 #define	BRIDGE_RTABLE_TIMEOUT		(20 * 60)	/* same as ARP */
161 #endif
162 
163 /*
164  * Number of seconds between walks of the route list.
165  */
166 #ifndef BRIDGE_RTABLE_PRUNE_PERIOD
167 #define	BRIDGE_RTABLE_PRUNE_PERIOD	(5 * 60)
168 #endif
169 
170 /*
171  * List of capabilities to mask on the member interface.
172  */
173 #define	BRIDGE_IFCAPS_MASK		IFCAP_TXCSUM
174 
175 /*
176  * Bridge interface list entry.
177  */
178 struct bridge_iflist {
179 	LIST_ENTRY(bridge_iflist) bif_next;
180 	struct ifnet		*bif_ifp;	/* member if */
181 	struct bstp_port	bif_stp;	/* STP state */
182 	uint32_t		bif_flags;	/* member if flags */
183 	int			bif_mutecap;	/* member muted caps */
184 	uint32_t		bif_addrmax;	/* max # of addresses */
185 	uint32_t		bif_addrcnt;	/* cur. # of addresses */
186 	uint32_t		bif_addrexceeded;/* # of address violations */
187 };
188 
189 /*
190  * Bridge route node.
191  */
192 struct bridge_rtnode {
193 	LIST_ENTRY(bridge_rtnode) brt_hash;	/* hash table linkage */
194 	LIST_ENTRY(bridge_rtnode) brt_list;	/* list linkage */
195 	struct bridge_iflist	*brt_dst;	/* destination if */
196 	unsigned long		brt_expire;	/* expiration time */
197 	uint8_t			brt_flags;	/* address flags */
198 	uint8_t			brt_addr[ETHER_ADDR_LEN];
199 	uint16_t		brt_vlan;	/* vlan id */
200 };
201 #define	brt_ifp			brt_dst->bif_ifp
202 
203 /*
204  * Software state for each bridge.
205  */
206 struct bridge_softc {
207 	struct ifnet		*sc_ifp;	/* make this an interface */
208 	LIST_ENTRY(bridge_softc) sc_list;
209 	struct mtx		sc_mtx;
210 	struct cv		sc_cv;
211 	uint32_t		sc_brtmax;	/* max # of addresses */
212 	uint32_t		sc_brtcnt;	/* cur. # of addresses */
213 	uint32_t		sc_brttimeout;	/* rt timeout in seconds */
214 	struct callout		sc_brcallout;	/* bridge callout */
215 	uint32_t		sc_iflist_ref;	/* refcount for sc_iflist */
216 	uint32_t		sc_iflist_xcnt;	/* refcount for sc_iflist */
217 	LIST_HEAD(, bridge_iflist) sc_iflist;	/* member interface list */
218 	LIST_HEAD(, bridge_rtnode) *sc_rthash;	/* our forwarding table */
219 	LIST_HEAD(, bridge_rtnode) sc_rtlist;	/* list version of above */
220 	uint32_t		sc_rthash_key;	/* key for hash */
221 	LIST_HEAD(, bridge_iflist) sc_spanlist;	/* span ports list */
222 	struct bstp_state	sc_stp;		/* STP state */
223 	uint32_t		sc_brtexceeded;	/* # of cache drops */
224 };
225 
226 static struct mtx 	bridge_list_mtx;
227 eventhandler_tag	bridge_detach_cookie = NULL;
228 
229 int	bridge_rtable_prune_period = BRIDGE_RTABLE_PRUNE_PERIOD;
230 
231 uma_zone_t bridge_rtnode_zone;
232 
233 static int	bridge_clone_create(struct if_clone *, int, caddr_t);
234 static void	bridge_clone_destroy(struct ifnet *);
235 
236 static int	bridge_ioctl(struct ifnet *, u_long, caddr_t);
237 static void	bridge_mutecaps(struct bridge_iflist *, int);
238 static void	bridge_ifdetach(void *arg __unused, struct ifnet *);
239 static void	bridge_init(void *);
240 static void	bridge_dummynet(struct mbuf *, struct ifnet *);
241 static void	bridge_stop(struct ifnet *, int);
242 static void	bridge_start(struct ifnet *);
243 static struct mbuf *bridge_input(struct ifnet *, struct mbuf *);
244 static int	bridge_output(struct ifnet *, struct mbuf *, struct sockaddr *,
245 		    struct rtentry *);
246 static void	bridge_enqueue(struct bridge_softc *, struct ifnet *,
247 		    struct mbuf *);
248 static void	bridge_rtdelete(struct bridge_softc *, struct ifnet *ifp, int);
249 
250 static void	bridge_forward(struct bridge_softc *, struct bridge_iflist *,
251 		    struct mbuf *m);
252 
253 static void	bridge_timer(void *);
254 
255 static void	bridge_broadcast(struct bridge_softc *, struct ifnet *,
256 		    struct mbuf *, int);
257 static void	bridge_span(struct bridge_softc *, struct mbuf *);
258 
259 static int	bridge_rtupdate(struct bridge_softc *, const uint8_t *,
260 		    uint16_t, struct bridge_iflist *, int, uint8_t);
261 static struct ifnet *bridge_rtlookup(struct bridge_softc *, const uint8_t *,
262 		    uint16_t);
263 static void	bridge_rttrim(struct bridge_softc *);
264 static void	bridge_rtage(struct bridge_softc *);
265 static void	bridge_rtflush(struct bridge_softc *, int);
266 static int	bridge_rtdaddr(struct bridge_softc *, const uint8_t *,
267 		    uint16_t);
268 
269 static int	bridge_rtable_init(struct bridge_softc *);
270 static void	bridge_rtable_fini(struct bridge_softc *);
271 
272 static int	bridge_rtnode_addr_cmp(const uint8_t *, const uint8_t *);
273 static struct bridge_rtnode *bridge_rtnode_lookup(struct bridge_softc *,
274 		    const uint8_t *, uint16_t);
275 static int	bridge_rtnode_insert(struct bridge_softc *,
276 		    struct bridge_rtnode *);
277 static void	bridge_rtnode_destroy(struct bridge_softc *,
278 		    struct bridge_rtnode *);
279 static void	bridge_rtable_expire(struct ifnet *, int);
280 static void	bridge_state_change(struct ifnet *, int);
281 
282 static struct bridge_iflist *bridge_lookup_member(struct bridge_softc *,
283 		    const char *name);
284 static struct bridge_iflist *bridge_lookup_member_if(struct bridge_softc *,
285 		    struct ifnet *ifp);
286 static void	bridge_delete_member(struct bridge_softc *,
287 		    struct bridge_iflist *, int);
288 static void	bridge_delete_span(struct bridge_softc *,
289 		    struct bridge_iflist *);
290 
291 static int	bridge_ioctl_add(struct bridge_softc *, void *);
292 static int	bridge_ioctl_del(struct bridge_softc *, void *);
293 static int	bridge_ioctl_gifflags(struct bridge_softc *, void *);
294 static int	bridge_ioctl_sifflags(struct bridge_softc *, void *);
295 static int	bridge_ioctl_scache(struct bridge_softc *, void *);
296 static int	bridge_ioctl_gcache(struct bridge_softc *, void *);
297 static int	bridge_ioctl_gifs(struct bridge_softc *, void *);
298 static int	bridge_ioctl_rts(struct bridge_softc *, void *);
299 static int	bridge_ioctl_saddr(struct bridge_softc *, void *);
300 static int	bridge_ioctl_sto(struct bridge_softc *, void *);
301 static int	bridge_ioctl_gto(struct bridge_softc *, void *);
302 static int	bridge_ioctl_daddr(struct bridge_softc *, void *);
303 static int	bridge_ioctl_flush(struct bridge_softc *, void *);
304 static int	bridge_ioctl_gpri(struct bridge_softc *, void *);
305 static int	bridge_ioctl_spri(struct bridge_softc *, void *);
306 static int	bridge_ioctl_ght(struct bridge_softc *, void *);
307 static int	bridge_ioctl_sht(struct bridge_softc *, void *);
308 static int	bridge_ioctl_gfd(struct bridge_softc *, void *);
309 static int	bridge_ioctl_sfd(struct bridge_softc *, void *);
310 static int	bridge_ioctl_gma(struct bridge_softc *, void *);
311 static int	bridge_ioctl_sma(struct bridge_softc *, void *);
312 static int	bridge_ioctl_sifprio(struct bridge_softc *, void *);
313 static int	bridge_ioctl_sifcost(struct bridge_softc *, void *);
314 static int	bridge_ioctl_sifmaxaddr(struct bridge_softc *, void *);
315 static int	bridge_ioctl_addspan(struct bridge_softc *, void *);
316 static int	bridge_ioctl_delspan(struct bridge_softc *, void *);
317 static int	bridge_ioctl_gbparam(struct bridge_softc *, void *);
318 static int	bridge_ioctl_grte(struct bridge_softc *, void *);
319 static int	bridge_ioctl_gifsstp(struct bridge_softc *, void *);
320 static int	bridge_ioctl_sproto(struct bridge_softc *, void *);
321 static int	bridge_ioctl_stxhc(struct bridge_softc *, void *);
322 static int	bridge_pfil(struct mbuf **, struct ifnet *, struct ifnet *,
323 		    int);
324 static int	bridge_ip_checkbasic(struct mbuf **mp);
325 #ifdef INET6
326 static int	bridge_ip6_checkbasic(struct mbuf **mp);
327 #endif /* INET6 */
328 static int	bridge_fragment(struct ifnet *, struct mbuf *,
329 		    struct ether_header *, int, struct llc *);
330 
331 /* The default bridge vlan is 1 (IEEE 802.1Q-2003 Table 9-2) */
332 #define	VLANTAGOF(_m)	\
333     (_m->m_flags & M_VLANTAG) ? EVL_VLANOFTAG(_m->m_pkthdr.ether_vtag) : 1
334 
335 static struct bstp_cb_ops bridge_ops = {
336 	.bcb_state = bridge_state_change,
337 	.bcb_rtage = bridge_rtable_expire
338 };
339 
340 SYSCTL_DECL(_net_link);
341 SYSCTL_NODE(_net_link, IFT_BRIDGE, bridge, CTLFLAG_RW, 0, "Bridge");
342 
343 static int pfil_onlyip = 1; /* only pass IP[46] packets when pfil is enabled */
344 static int pfil_bridge = 1; /* run pfil hooks on the bridge interface */
345 static int pfil_member = 1; /* run pfil hooks on the member interface */
346 static int pfil_ipfw = 0;   /* layer2 filter with ipfw */
347 static int pfil_ipfw_arp = 0;   /* layer2 filter with ipfw */
348 static int pfil_local_phys = 0; /* run pfil hooks on the physical interface for
349                                    locally destined packets */
350 static int log_stp   = 0;   /* log STP state changes */
351 SYSCTL_INT(_net_link_bridge, OID_AUTO, pfil_onlyip, CTLFLAG_RW,
352     &pfil_onlyip, 0, "Only pass IP packets when pfil is enabled");
353 SYSCTL_INT(_net_link_bridge, OID_AUTO, ipfw_arp, CTLFLAG_RW,
354     &pfil_ipfw_arp, 0, "Filter ARP packets through IPFW layer2");
355 SYSCTL_INT(_net_link_bridge, OID_AUTO, pfil_bridge, CTLFLAG_RW,
356     &pfil_bridge, 0, "Packet filter on the bridge interface");
357 SYSCTL_INT(_net_link_bridge, OID_AUTO, pfil_member, CTLFLAG_RW,
358     &pfil_member, 0, "Packet filter on the member interface");
359 SYSCTL_INT(_net_link_bridge, OID_AUTO, pfil_local_phys, CTLFLAG_RW,
360     &pfil_local_phys, 0,
361     "Packet filter on the physical interface for locally destined packets");
362 SYSCTL_INT(_net_link_bridge, OID_AUTO, log_stp, CTLFLAG_RW,
363     &log_stp, 0, "Log STP state changes");
364 
365 struct bridge_control {
366 	int	(*bc_func)(struct bridge_softc *, void *);
367 	int	bc_argsize;
368 	int	bc_flags;
369 };
370 
371 #define	BC_F_COPYIN		0x01	/* copy arguments in */
372 #define	BC_F_COPYOUT		0x02	/* copy arguments out */
373 #define	BC_F_SUSER		0x04	/* do super-user check */
374 
375 const struct bridge_control bridge_control_table[] = {
376 	{ bridge_ioctl_add,		sizeof(struct ifbreq),
377 	  BC_F_COPYIN|BC_F_SUSER },
378 	{ bridge_ioctl_del,		sizeof(struct ifbreq),
379 	  BC_F_COPYIN|BC_F_SUSER },
380 
381 	{ bridge_ioctl_gifflags,	sizeof(struct ifbreq),
382 	  BC_F_COPYIN|BC_F_COPYOUT },
383 	{ bridge_ioctl_sifflags,	sizeof(struct ifbreq),
384 	  BC_F_COPYIN|BC_F_SUSER },
385 
386 	{ bridge_ioctl_scache,		sizeof(struct ifbrparam),
387 	  BC_F_COPYIN|BC_F_SUSER },
388 	{ bridge_ioctl_gcache,		sizeof(struct ifbrparam),
389 	  BC_F_COPYOUT },
390 
391 	{ bridge_ioctl_gifs,		sizeof(struct ifbifconf),
392 	  BC_F_COPYIN|BC_F_COPYOUT },
393 	{ bridge_ioctl_rts,		sizeof(struct ifbaconf),
394 	  BC_F_COPYIN|BC_F_COPYOUT },
395 
396 	{ bridge_ioctl_saddr,		sizeof(struct ifbareq),
397 	  BC_F_COPYIN|BC_F_SUSER },
398 
399 	{ bridge_ioctl_sto,		sizeof(struct ifbrparam),
400 	  BC_F_COPYIN|BC_F_SUSER },
401 	{ bridge_ioctl_gto,		sizeof(struct ifbrparam),
402 	  BC_F_COPYOUT },
403 
404 	{ bridge_ioctl_daddr,		sizeof(struct ifbareq),
405 	  BC_F_COPYIN|BC_F_SUSER },
406 
407 	{ bridge_ioctl_flush,		sizeof(struct ifbreq),
408 	  BC_F_COPYIN|BC_F_SUSER },
409 
410 	{ bridge_ioctl_gpri,		sizeof(struct ifbrparam),
411 	  BC_F_COPYOUT },
412 	{ bridge_ioctl_spri,		sizeof(struct ifbrparam),
413 	  BC_F_COPYIN|BC_F_SUSER },
414 
415 	{ bridge_ioctl_ght,		sizeof(struct ifbrparam),
416 	  BC_F_COPYOUT },
417 	{ bridge_ioctl_sht,		sizeof(struct ifbrparam),
418 	  BC_F_COPYIN|BC_F_SUSER },
419 
420 	{ bridge_ioctl_gfd,		sizeof(struct ifbrparam),
421 	  BC_F_COPYOUT },
422 	{ bridge_ioctl_sfd,		sizeof(struct ifbrparam),
423 	  BC_F_COPYIN|BC_F_SUSER },
424 
425 	{ bridge_ioctl_gma,		sizeof(struct ifbrparam),
426 	  BC_F_COPYOUT },
427 	{ bridge_ioctl_sma,		sizeof(struct ifbrparam),
428 	  BC_F_COPYIN|BC_F_SUSER },
429 
430 	{ bridge_ioctl_sifprio,		sizeof(struct ifbreq),
431 	  BC_F_COPYIN|BC_F_SUSER },
432 
433 	{ bridge_ioctl_sifcost,		sizeof(struct ifbreq),
434 	  BC_F_COPYIN|BC_F_SUSER },
435 
436 	{ bridge_ioctl_addspan,		sizeof(struct ifbreq),
437 	  BC_F_COPYIN|BC_F_SUSER },
438 	{ bridge_ioctl_delspan,		sizeof(struct ifbreq),
439 	  BC_F_COPYIN|BC_F_SUSER },
440 
441 	{ bridge_ioctl_gbparam,		sizeof(struct ifbropreq),
442 	  BC_F_COPYOUT },
443 
444 	{ bridge_ioctl_grte,		sizeof(struct ifbrparam),
445 	  BC_F_COPYOUT },
446 
447 	{ bridge_ioctl_gifsstp,		sizeof(struct ifbpstpconf),
448 	  BC_F_COPYIN|BC_F_COPYOUT },
449 
450 	{ bridge_ioctl_sproto,		sizeof(struct ifbrparam),
451 	  BC_F_COPYIN|BC_F_SUSER },
452 
453 	{ bridge_ioctl_stxhc,		sizeof(struct ifbrparam),
454 	  BC_F_COPYIN|BC_F_SUSER },
455 
456 	{ bridge_ioctl_sifmaxaddr,	sizeof(struct ifbreq),
457 	  BC_F_COPYIN|BC_F_SUSER },
458 
459 };
460 const int bridge_control_table_size =
461     sizeof(bridge_control_table) / sizeof(bridge_control_table[0]);
462 
463 LIST_HEAD(, bridge_softc) bridge_list;
464 
465 IFC_SIMPLE_DECLARE(bridge, 0);
466 
467 static int
468 bridge_modevent(module_t mod, int type, void *data)
469 {
470 
471 	switch (type) {
472 	case MOD_LOAD:
473 		mtx_init(&bridge_list_mtx, "if_bridge list", NULL, MTX_DEF);
474 		if_clone_attach(&bridge_cloner);
475 		bridge_rtnode_zone = uma_zcreate("bridge_rtnode",
476 		    sizeof(struct bridge_rtnode), NULL, NULL, NULL, NULL,
477 		    UMA_ALIGN_PTR, 0);
478 		LIST_INIT(&bridge_list);
479 		bridge_input_p = bridge_input;
480 		bridge_output_p = bridge_output;
481 		bridge_dn_p = bridge_dummynet;
482 		bridge_detach_cookie = EVENTHANDLER_REGISTER(
483 		    ifnet_departure_event, bridge_ifdetach, NULL,
484 		    EVENTHANDLER_PRI_ANY);
485 		break;
486 	case MOD_UNLOAD:
487 		EVENTHANDLER_DEREGISTER(ifnet_departure_event,
488 		    bridge_detach_cookie);
489 		if_clone_detach(&bridge_cloner);
490 		uma_zdestroy(bridge_rtnode_zone);
491 		bridge_input_p = NULL;
492 		bridge_output_p = NULL;
493 		bridge_dn_p = NULL;
494 		mtx_destroy(&bridge_list_mtx);
495 		break;
496 	default:
497 		return (EOPNOTSUPP);
498 	}
499 	return (0);
500 }
501 
502 static moduledata_t bridge_mod = {
503 	"if_bridge",
504 	bridge_modevent,
505 	0
506 };
507 
508 DECLARE_MODULE(if_bridge, bridge_mod, SI_SUB_PSEUDO, SI_ORDER_ANY);
509 MODULE_DEPEND(if_bridge, bridgestp, 1, 1, 1);
510 
511 /*
512  * handler for net.link.bridge.pfil_ipfw
513  */
514 static int
515 sysctl_pfil_ipfw(SYSCTL_HANDLER_ARGS)
516 {
517 	int enable = pfil_ipfw;
518 	int error;
519 
520 	error = sysctl_handle_int(oidp, &enable, 0, req);
521 	enable = (enable) ? 1 : 0;
522 
523 	if (enable != pfil_ipfw) {
524 		pfil_ipfw = enable;
525 
526 		/*
527 		 * Disable pfil so that ipfw doesnt run twice, if the user
528 		 * really wants both then they can re-enable pfil_bridge and/or
529 		 * pfil_member. Also allow non-ip packets as ipfw can filter by
530 		 * layer2 type.
531 		 */
532 		if (pfil_ipfw) {
533 			pfil_onlyip = 0;
534 			pfil_bridge = 0;
535 			pfil_member = 0;
536 		}
537 	}
538 
539 	return (error);
540 }
541 SYSCTL_PROC(_net_link_bridge, OID_AUTO, ipfw, CTLTYPE_INT|CTLFLAG_RW,
542 	    &pfil_ipfw, 0, &sysctl_pfil_ipfw, "I", "Layer2 filter with IPFW");
543 
544 /*
545  * bridge_clone_create:
546  *
547  *	Create a new bridge instance.
548  */
549 static int
550 bridge_clone_create(struct if_clone *ifc, int unit, caddr_t params)
551 {
552 	struct bridge_softc *sc, *sc2;
553 	struct ifnet *bifp, *ifp;
554 	u_char eaddr[6];
555 	int retry;
556 
557 	sc = malloc(sizeof(*sc), M_DEVBUF, M_WAITOK|M_ZERO);
558 	ifp = sc->sc_ifp = if_alloc(IFT_ETHER);
559 	if (ifp == NULL) {
560 		free(sc, M_DEVBUF);
561 		return (ENOSPC);
562 	}
563 
564 	BRIDGE_LOCK_INIT(sc);
565 	sc->sc_brtmax = BRIDGE_RTABLE_MAX;
566 	sc->sc_brttimeout = BRIDGE_RTABLE_TIMEOUT;
567 
568 	/* Initialize our routing table. */
569 	bridge_rtable_init(sc);
570 
571 	callout_init_mtx(&sc->sc_brcallout, &sc->sc_mtx, 0);
572 
573 	LIST_INIT(&sc->sc_iflist);
574 	LIST_INIT(&sc->sc_spanlist);
575 
576 	ifp->if_softc = sc;
577 	if_initname(ifp, ifc->ifc_name, unit);
578 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
579 	ifp->if_ioctl = bridge_ioctl;
580 	ifp->if_start = bridge_start;
581 	ifp->if_init = bridge_init;
582 	ifp->if_type = IFT_BRIDGE;
583 	IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen);
584 	ifp->if_snd.ifq_drv_maxlen = ifqmaxlen;
585 	IFQ_SET_READY(&ifp->if_snd);
586 
587 	/*
588 	 * Generate a random ethernet address with a locally administered
589 	 * address.
590 	 *
591 	 * Since we are using random ethernet addresses for the bridge, it is
592 	 * possible that we might have address collisions, so make sure that
593 	 * this hardware address isn't already in use on another bridge.
594 	 */
595 	for (retry = 1; retry != 0;) {
596 		arc4rand(eaddr, ETHER_ADDR_LEN, 1);
597 		eaddr[0] &= ~1;		/* clear multicast bit */
598 		eaddr[0] |= 2;		/* set the LAA bit */
599 		retry = 0;
600 		mtx_lock(&bridge_list_mtx);
601 		LIST_FOREACH(sc2, &bridge_list, sc_list) {
602 			bifp = sc2->sc_ifp;
603 			if (memcmp(eaddr, IF_LLADDR(bifp), ETHER_ADDR_LEN) == 0)
604 				retry = 1;
605 		}
606 		mtx_unlock(&bridge_list_mtx);
607 	}
608 
609 	bstp_attach(&sc->sc_stp, &bridge_ops);
610 	ether_ifattach(ifp, eaddr);
611 	/* Now undo some of the damage... */
612 	ifp->if_baudrate = 0;
613 	ifp->if_type = IFT_BRIDGE;
614 
615 	mtx_lock(&bridge_list_mtx);
616 	LIST_INSERT_HEAD(&bridge_list, sc, sc_list);
617 	mtx_unlock(&bridge_list_mtx);
618 
619 	return (0);
620 }
621 
622 /*
623  * bridge_clone_destroy:
624  *
625  *	Destroy a bridge instance.
626  */
627 static void
628 bridge_clone_destroy(struct ifnet *ifp)
629 {
630 	struct bridge_softc *sc = ifp->if_softc;
631 	struct bridge_iflist *bif;
632 
633 	BRIDGE_LOCK(sc);
634 
635 	bridge_stop(ifp, 1);
636 	ifp->if_flags &= ~IFF_UP;
637 
638 	while ((bif = LIST_FIRST(&sc->sc_iflist)) != NULL)
639 		bridge_delete_member(sc, bif, 0);
640 
641 	while ((bif = LIST_FIRST(&sc->sc_spanlist)) != NULL) {
642 		bridge_delete_span(sc, bif);
643 	}
644 
645 	BRIDGE_UNLOCK(sc);
646 
647 	callout_drain(&sc->sc_brcallout);
648 
649 	mtx_lock(&bridge_list_mtx);
650 	LIST_REMOVE(sc, sc_list);
651 	mtx_unlock(&bridge_list_mtx);
652 
653 	bstp_detach(&sc->sc_stp);
654 	ether_ifdetach(ifp);
655 	if_free_type(ifp, IFT_ETHER);
656 
657 	/* Tear down the routing table. */
658 	bridge_rtable_fini(sc);
659 
660 	BRIDGE_LOCK_DESTROY(sc);
661 	free(sc, M_DEVBUF);
662 }
663 
664 /*
665  * bridge_ioctl:
666  *
667  *	Handle a control request from the operator.
668  */
669 static int
670 bridge_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
671 {
672 	struct bridge_softc *sc = ifp->if_softc;
673 	struct thread *td = curthread;
674 	union {
675 		struct ifbreq ifbreq;
676 		struct ifbifconf ifbifconf;
677 		struct ifbareq ifbareq;
678 		struct ifbaconf ifbaconf;
679 		struct ifbrparam ifbrparam;
680 		struct ifbropreq ifbropreq;
681 	} args;
682 	struct ifdrv *ifd = (struct ifdrv *) data;
683 	const struct bridge_control *bc;
684 	int error = 0;
685 
686 	switch (cmd) {
687 
688 	case SIOCADDMULTI:
689 	case SIOCDELMULTI:
690 		break;
691 
692 	case SIOCGDRVSPEC:
693 	case SIOCSDRVSPEC:
694 		if (ifd->ifd_cmd >= bridge_control_table_size) {
695 			error = EINVAL;
696 			break;
697 		}
698 		bc = &bridge_control_table[ifd->ifd_cmd];
699 
700 		if (cmd == SIOCGDRVSPEC &&
701 		    (bc->bc_flags & BC_F_COPYOUT) == 0) {
702 			error = EINVAL;
703 			break;
704 		}
705 		else if (cmd == SIOCSDRVSPEC &&
706 		    (bc->bc_flags & BC_F_COPYOUT) != 0) {
707 			error = EINVAL;
708 			break;
709 		}
710 
711 		if (bc->bc_flags & BC_F_SUSER) {
712 			error = priv_check(td, PRIV_NET_BRIDGE);
713 			if (error)
714 				break;
715 		}
716 
717 		if (ifd->ifd_len != bc->bc_argsize ||
718 		    ifd->ifd_len > sizeof(args)) {
719 			error = EINVAL;
720 			break;
721 		}
722 
723 		bzero(&args, sizeof(args));
724 		if (bc->bc_flags & BC_F_COPYIN) {
725 			error = copyin(ifd->ifd_data, &args, ifd->ifd_len);
726 			if (error)
727 				break;
728 		}
729 
730 		BRIDGE_LOCK(sc);
731 		error = (*bc->bc_func)(sc, &args);
732 		BRIDGE_UNLOCK(sc);
733 		if (error)
734 			break;
735 
736 		if (bc->bc_flags & BC_F_COPYOUT)
737 			error = copyout(&args, ifd->ifd_data, ifd->ifd_len);
738 
739 		break;
740 
741 	case SIOCSIFFLAGS:
742 		if (!(ifp->if_flags & IFF_UP) &&
743 		    (ifp->if_drv_flags & IFF_DRV_RUNNING)) {
744 			/*
745 			 * If interface is marked down and it is running,
746 			 * then stop and disable it.
747 			 */
748 			BRIDGE_LOCK(sc);
749 			bridge_stop(ifp, 1);
750 			BRIDGE_UNLOCK(sc);
751 		} else if ((ifp->if_flags & IFF_UP) &&
752 		    !(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
753 			/*
754 			 * If interface is marked up and it is stopped, then
755 			 * start it.
756 			 */
757 			(*ifp->if_init)(sc);
758 		}
759 		break;
760 
761 	case SIOCSIFMTU:
762 		/* Do not allow the MTU to be changed on the bridge */
763 		error = EINVAL;
764 		break;
765 
766 	default:
767 		/*
768 		 * drop the lock as ether_ioctl() will call bridge_start() and
769 		 * cause the lock to be recursed.
770 		 */
771 		error = ether_ioctl(ifp, cmd, data);
772 		break;
773 	}
774 
775 	return (error);
776 }
777 
778 /*
779  * bridge_mutecaps:
780  *
781  *	Clear or restore unwanted capabilities on the member interface
782  */
783 static void
784 bridge_mutecaps(struct bridge_iflist *bif, int mute)
785 {
786 	struct ifnet *ifp = bif->bif_ifp;
787 	struct ifreq ifr;
788 	int error;
789 
790 	if (ifp->if_ioctl == NULL)
791 		return;
792 
793 	bzero(&ifr, sizeof(ifr));
794 	ifr.ifr_reqcap = ifp->if_capenable;
795 
796 	if (mute) {
797 		/* mask off and save capabilities */
798 		bif->bif_mutecap = ifr.ifr_reqcap & BRIDGE_IFCAPS_MASK;
799 		if (bif->bif_mutecap != 0)
800 			ifr.ifr_reqcap &= ~BRIDGE_IFCAPS_MASK;
801 	} else
802 		/* restore muted capabilities */
803 		ifr.ifr_reqcap |= bif->bif_mutecap;
804 
805 
806 	if (bif->bif_mutecap != 0) {
807 		IFF_LOCKGIANT(ifp);
808 		error = (*ifp->if_ioctl)(ifp, SIOCSIFCAP, (caddr_t)&ifr);
809 		IFF_UNLOCKGIANT(ifp);
810 	}
811 }
812 
813 /*
814  * bridge_lookup_member:
815  *
816  *	Lookup a bridge member interface.
817  */
818 static struct bridge_iflist *
819 bridge_lookup_member(struct bridge_softc *sc, const char *name)
820 {
821 	struct bridge_iflist *bif;
822 	struct ifnet *ifp;
823 
824 	BRIDGE_LOCK_ASSERT(sc);
825 
826 	LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
827 		ifp = bif->bif_ifp;
828 		if (strcmp(ifp->if_xname, name) == 0)
829 			return (bif);
830 	}
831 
832 	return (NULL);
833 }
834 
835 /*
836  * bridge_lookup_member_if:
837  *
838  *	Lookup a bridge member interface by ifnet*.
839  */
840 static struct bridge_iflist *
841 bridge_lookup_member_if(struct bridge_softc *sc, struct ifnet *member_ifp)
842 {
843 	struct bridge_iflist *bif;
844 
845 	BRIDGE_LOCK_ASSERT(sc);
846 
847 	LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
848 		if (bif->bif_ifp == member_ifp)
849 			return (bif);
850 	}
851 
852 	return (NULL);
853 }
854 
855 /*
856  * bridge_delete_member:
857  *
858  *	Delete the specified member interface.
859  */
860 static void
861 bridge_delete_member(struct bridge_softc *sc, struct bridge_iflist *bif,
862     int gone)
863 {
864 	struct ifnet *ifs = bif->bif_ifp;
865 
866 	BRIDGE_LOCK_ASSERT(sc);
867 
868 	if (!gone) {
869 		switch (ifs->if_type) {
870 		case IFT_ETHER:
871 		case IFT_L2VLAN:
872 			/*
873 			 * Take the interface out of promiscuous mode.
874 			 */
875 			(void) ifpromisc(ifs, 0);
876 			bridge_mutecaps(bif, 0);
877 			break;
878 
879 		case IFT_GIF:
880 			break;
881 
882 		default:
883 #ifdef DIAGNOSTIC
884 			panic("bridge_delete_member: impossible");
885 #endif
886 			break;
887 		}
888 	}
889 
890 	if (bif->bif_flags & IFBIF_STP)
891 		bstp_disable(&bif->bif_stp);
892 
893 	ifs->if_bridge = NULL;
894 	BRIDGE_XLOCK(sc);
895 	LIST_REMOVE(bif, bif_next);
896 	BRIDGE_XDROP(sc);
897 
898 	bridge_rtdelete(sc, ifs, IFBF_FLUSHALL);
899 	KASSERT(bif->bif_addrcnt == 0,
900 	    ("%s: %d bridge routes referenced", __func__, bif->bif_addrcnt));
901 
902 	BRIDGE_UNLOCK(sc);
903 	bstp_destroy(&bif->bif_stp);	/* prepare to free */
904 	BRIDGE_LOCK(sc);
905 	free(bif, M_DEVBUF);
906 }
907 
908 /*
909  * bridge_delete_span:
910  *
911  *	Delete the specified span interface.
912  */
913 static void
914 bridge_delete_span(struct bridge_softc *sc, struct bridge_iflist *bif)
915 {
916 	BRIDGE_LOCK_ASSERT(sc);
917 
918 	KASSERT(bif->bif_ifp->if_bridge == NULL,
919 	    ("%s: not a span interface", __func__));
920 
921 	LIST_REMOVE(bif, bif_next);
922 	free(bif, M_DEVBUF);
923 }
924 
925 static int
926 bridge_ioctl_add(struct bridge_softc *sc, void *arg)
927 {
928 	struct ifbreq *req = arg;
929 	struct bridge_iflist *bif = NULL;
930 	struct ifnet *ifs;
931 	int error = 0;
932 
933 	ifs = ifunit(req->ifbr_ifsname);
934 	if (ifs == NULL)
935 		return (ENOENT);
936 
937 	/* If it's in the span list, it can't be a member. */
938 	LIST_FOREACH(bif, &sc->sc_spanlist, bif_next)
939 		if (ifs == bif->bif_ifp)
940 			return (EBUSY);
941 
942 	/* Allow the first Ethernet member to define the MTU */
943 	if (ifs->if_type != IFT_GIF) {
944 		if (LIST_EMPTY(&sc->sc_iflist))
945 			sc->sc_ifp->if_mtu = ifs->if_mtu;
946 		else if (sc->sc_ifp->if_mtu != ifs->if_mtu) {
947 			if_printf(sc->sc_ifp, "invalid MTU for %s\n",
948 			    ifs->if_xname);
949 			return (EINVAL);
950 		}
951 	}
952 
953 	if (ifs->if_bridge == sc)
954 		return (EEXIST);
955 
956 	if (ifs->if_bridge != NULL)
957 		return (EBUSY);
958 
959 	bif = malloc(sizeof(*bif), M_DEVBUF, M_NOWAIT|M_ZERO);
960 	if (bif == NULL)
961 		return (ENOMEM);
962 
963 	bif->bif_ifp = ifs;
964 	bif->bif_flags = IFBIF_LEARNING | IFBIF_DISCOVER;
965 
966 	switch (ifs->if_type) {
967 	case IFT_ETHER:
968 	case IFT_L2VLAN:
969 		/*
970 		 * Place the interface into promiscuous mode.
971 		 */
972 		error = ifpromisc(ifs, 1);
973 		if (error)
974 			goto out;
975 
976 		bridge_mutecaps(bif, 1);
977 		break;
978 
979 	case IFT_GIF:
980 		break;
981 
982 	default:
983 		error = EINVAL;
984 		goto out;
985 	}
986 
987 	ifs->if_bridge = sc;
988 	bstp_create(&sc->sc_stp, &bif->bif_stp, bif->bif_ifp);
989 	/*
990 	 * XXX: XLOCK HERE!?!
991 	 *
992 	 * NOTE: insert_***HEAD*** should be safe for the traversals.
993 	 */
994 	LIST_INSERT_HEAD(&sc->sc_iflist, bif, bif_next);
995 
996 out:
997 	if (error) {
998 		if (bif != NULL)
999 			free(bif, M_DEVBUF);
1000 	}
1001 	return (error);
1002 }
1003 
1004 static int
1005 bridge_ioctl_del(struct bridge_softc *sc, void *arg)
1006 {
1007 	struct ifbreq *req = arg;
1008 	struct bridge_iflist *bif;
1009 
1010 	bif = bridge_lookup_member(sc, req->ifbr_ifsname);
1011 	if (bif == NULL)
1012 		return (ENOENT);
1013 
1014 	bridge_delete_member(sc, bif, 0);
1015 
1016 	return (0);
1017 }
1018 
1019 static int
1020 bridge_ioctl_gifflags(struct bridge_softc *sc, void *arg)
1021 {
1022 	struct ifbreq *req = arg;
1023 	struct bridge_iflist *bif;
1024 	struct bstp_port *bp;
1025 
1026 	bif = bridge_lookup_member(sc, req->ifbr_ifsname);
1027 	if (bif == NULL)
1028 		return (ENOENT);
1029 
1030 	bp = &bif->bif_stp;
1031 	req->ifbr_ifsflags = bif->bif_flags;
1032 	req->ifbr_state = bp->bp_state;
1033 	req->ifbr_priority = bp->bp_priority;
1034 	req->ifbr_path_cost = bp->bp_path_cost;
1035 	req->ifbr_portno = bif->bif_ifp->if_index & 0xfff;
1036 	req->ifbr_proto = bp->bp_protover;
1037 	req->ifbr_role = bp->bp_role;
1038 	req->ifbr_stpflags = bp->bp_flags;
1039 	req->ifbr_addrcnt = bif->bif_addrcnt;
1040 	req->ifbr_addrmax = bif->bif_addrmax;
1041 	req->ifbr_addrexceeded = bif->bif_addrexceeded;
1042 
1043 	/* Copy STP state options as flags */
1044 	if (bp->bp_operedge)
1045 		req->ifbr_ifsflags |= IFBIF_BSTP_EDGE;
1046 	if (bp->bp_flags & BSTP_PORT_AUTOEDGE)
1047 		req->ifbr_ifsflags |= IFBIF_BSTP_AUTOEDGE;
1048 	if (bp->bp_ptp_link)
1049 		req->ifbr_ifsflags |= IFBIF_BSTP_PTP;
1050 	if (bp->bp_flags & BSTP_PORT_AUTOPTP)
1051 		req->ifbr_ifsflags |= IFBIF_BSTP_AUTOPTP;
1052 	if (bp->bp_flags & BSTP_PORT_ADMEDGE)
1053 		req->ifbr_ifsflags |= IFBIF_BSTP_ADMEDGE;
1054 	if (bp->bp_flags & BSTP_PORT_ADMCOST)
1055 		req->ifbr_ifsflags |= IFBIF_BSTP_ADMCOST;
1056 	return (0);
1057 }
1058 
1059 static int
1060 bridge_ioctl_sifflags(struct bridge_softc *sc, void *arg)
1061 {
1062 	struct ifbreq *req = arg;
1063 	struct bridge_iflist *bif;
1064 	struct bstp_port *bp;
1065 	int error;
1066 
1067 	bif = bridge_lookup_member(sc, req->ifbr_ifsname);
1068 	if (bif == NULL)
1069 		return (ENOENT);
1070 	bp = &bif->bif_stp;
1071 
1072 	if (req->ifbr_ifsflags & IFBIF_SPAN)
1073 		/* SPAN is readonly */
1074 		return (EINVAL);
1075 
1076 	if (req->ifbr_ifsflags & IFBIF_STP) {
1077 		if ((bif->bif_flags & IFBIF_STP) == 0) {
1078 			error = bstp_enable(&bif->bif_stp);
1079 			if (error)
1080 				return (error);
1081 		}
1082 	} else {
1083 		if ((bif->bif_flags & IFBIF_STP) != 0)
1084 			bstp_disable(&bif->bif_stp);
1085 	}
1086 
1087 	/* Pass on STP flags */
1088 	bstp_set_edge(bp, req->ifbr_ifsflags & IFBIF_BSTP_EDGE ? 1 : 0);
1089 	bstp_set_autoedge(bp, req->ifbr_ifsflags & IFBIF_BSTP_AUTOEDGE ? 1 : 0);
1090 	bstp_set_ptp(bp, req->ifbr_ifsflags & IFBIF_BSTP_PTP ? 1 : 0);
1091 	bstp_set_autoptp(bp, req->ifbr_ifsflags & IFBIF_BSTP_AUTOPTP ? 1 : 0);
1092 
1093 	/* Save the bits relating to the bridge */
1094 	bif->bif_flags = req->ifbr_ifsflags & IFBIFMASK;
1095 
1096 	return (0);
1097 }
1098 
1099 static int
1100 bridge_ioctl_scache(struct bridge_softc *sc, void *arg)
1101 {
1102 	struct ifbrparam *param = arg;
1103 
1104 	sc->sc_brtmax = param->ifbrp_csize;
1105 	bridge_rttrim(sc);
1106 
1107 	return (0);
1108 }
1109 
1110 static int
1111 bridge_ioctl_gcache(struct bridge_softc *sc, void *arg)
1112 {
1113 	struct ifbrparam *param = arg;
1114 
1115 	param->ifbrp_csize = sc->sc_brtmax;
1116 
1117 	return (0);
1118 }
1119 
1120 static int
1121 bridge_ioctl_gifs(struct bridge_softc *sc, void *arg)
1122 {
1123 	struct ifbifconf *bifc = arg;
1124 	struct bridge_iflist *bif;
1125 	struct ifbreq breq;
1126 	char *buf, *outbuf;
1127 	int count, buflen, len, error = 0;
1128 
1129 	count = 0;
1130 	LIST_FOREACH(bif, &sc->sc_iflist, bif_next)
1131 		count++;
1132 	LIST_FOREACH(bif, &sc->sc_spanlist, bif_next)
1133 		count++;
1134 
1135 	buflen = sizeof(breq) * count;
1136 	if (bifc->ifbic_len == 0) {
1137 		bifc->ifbic_len = buflen;
1138 		return (0);
1139 	}
1140 	BRIDGE_UNLOCK(sc);
1141 	outbuf = malloc(buflen, M_TEMP, M_WAITOK | M_ZERO);
1142 	BRIDGE_LOCK(sc);
1143 
1144 	count = 0;
1145 	buf = outbuf;
1146 	len = min(bifc->ifbic_len, buflen);
1147 	bzero(&breq, sizeof(breq));
1148 	LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
1149 		if (len < sizeof(breq))
1150 			break;
1151 
1152 		strlcpy(breq.ifbr_ifsname, bif->bif_ifp->if_xname,
1153 		    sizeof(breq.ifbr_ifsname));
1154 		/* Fill in the ifbreq structure */
1155 		error = bridge_ioctl_gifflags(sc, &breq);
1156 		if (error)
1157 			break;
1158 		memcpy(buf, &breq, sizeof(breq));
1159 		count++;
1160 		buf += sizeof(breq);
1161 		len -= sizeof(breq);
1162 	}
1163 	LIST_FOREACH(bif, &sc->sc_spanlist, bif_next) {
1164 		if (len < sizeof(breq))
1165 			break;
1166 
1167 		strlcpy(breq.ifbr_ifsname, bif->bif_ifp->if_xname,
1168 		    sizeof(breq.ifbr_ifsname));
1169 		breq.ifbr_ifsflags = bif->bif_flags;
1170 		breq.ifbr_portno = bif->bif_ifp->if_index & 0xfff;
1171 		memcpy(buf, &breq, sizeof(breq));
1172 		count++;
1173 		buf += sizeof(breq);
1174 		len -= sizeof(breq);
1175 	}
1176 
1177 	BRIDGE_UNLOCK(sc);
1178 	bifc->ifbic_len = sizeof(breq) * count;
1179 	error = copyout(outbuf, bifc->ifbic_req, bifc->ifbic_len);
1180 	BRIDGE_LOCK(sc);
1181 	free(outbuf, M_TEMP);
1182 	return (error);
1183 }
1184 
1185 static int
1186 bridge_ioctl_rts(struct bridge_softc *sc, void *arg)
1187 {
1188 	struct ifbaconf *bac = arg;
1189 	struct bridge_rtnode *brt;
1190 	struct ifbareq bareq;
1191 	char *buf, *outbuf;
1192 	int count, buflen, len, error = 0;
1193 
1194 	if (bac->ifbac_len == 0)
1195 		return (0);
1196 
1197 	count = 0;
1198 	LIST_FOREACH(brt, &sc->sc_rtlist, brt_list)
1199 		count++;
1200 	buflen = sizeof(bareq) * count;
1201 
1202 	BRIDGE_UNLOCK(sc);
1203 	outbuf = malloc(buflen, M_TEMP, M_WAITOK | M_ZERO);
1204 	BRIDGE_LOCK(sc);
1205 
1206 	count = 0;
1207 	buf = outbuf;
1208 	len = min(bac->ifbac_len, buflen);
1209 	bzero(&bareq, sizeof(bareq));
1210 	LIST_FOREACH(brt, &sc->sc_rtlist, brt_list) {
1211 		if (len < sizeof(bareq))
1212 			goto out;
1213 		strlcpy(bareq.ifba_ifsname, brt->brt_ifp->if_xname,
1214 		    sizeof(bareq.ifba_ifsname));
1215 		memcpy(bareq.ifba_dst, brt->brt_addr, sizeof(brt->brt_addr));
1216 		bareq.ifba_vlan = brt->brt_vlan;
1217 		if ((brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC &&
1218 				time_uptime < brt->brt_expire)
1219 			bareq.ifba_expire = brt->brt_expire - time_uptime;
1220 		else
1221 			bareq.ifba_expire = 0;
1222 		bareq.ifba_flags = brt->brt_flags;
1223 
1224 		memcpy(buf, &bareq, sizeof(bareq));
1225 		count++;
1226 		buf += sizeof(bareq);
1227 		len -= sizeof(bareq);
1228 	}
1229 out:
1230 	BRIDGE_UNLOCK(sc);
1231 	bac->ifbac_len = sizeof(bareq) * count;
1232 	error = copyout(outbuf, bac->ifbac_req, bac->ifbac_len);
1233 	BRIDGE_LOCK(sc);
1234 	free(outbuf, M_TEMP);
1235 	return (error);
1236 }
1237 
1238 static int
1239 bridge_ioctl_saddr(struct bridge_softc *sc, void *arg)
1240 {
1241 	struct ifbareq *req = arg;
1242 	struct bridge_iflist *bif;
1243 	int error;
1244 
1245 	bif = bridge_lookup_member(sc, req->ifba_ifsname);
1246 	if (bif == NULL)
1247 		return (ENOENT);
1248 
1249 	error = bridge_rtupdate(sc, req->ifba_dst, req->ifba_vlan, bif, 1,
1250 	    req->ifba_flags);
1251 
1252 	return (error);
1253 }
1254 
1255 static int
1256 bridge_ioctl_sto(struct bridge_softc *sc, void *arg)
1257 {
1258 	struct ifbrparam *param = arg;
1259 
1260 	sc->sc_brttimeout = param->ifbrp_ctime;
1261 	return (0);
1262 }
1263 
1264 static int
1265 bridge_ioctl_gto(struct bridge_softc *sc, void *arg)
1266 {
1267 	struct ifbrparam *param = arg;
1268 
1269 	param->ifbrp_ctime = sc->sc_brttimeout;
1270 	return (0);
1271 }
1272 
1273 static int
1274 bridge_ioctl_daddr(struct bridge_softc *sc, void *arg)
1275 {
1276 	struct ifbareq *req = arg;
1277 
1278 	return (bridge_rtdaddr(sc, req->ifba_dst, req->ifba_vlan));
1279 }
1280 
1281 static int
1282 bridge_ioctl_flush(struct bridge_softc *sc, void *arg)
1283 {
1284 	struct ifbreq *req = arg;
1285 
1286 	bridge_rtflush(sc, req->ifbr_ifsflags);
1287 	return (0);
1288 }
1289 
1290 static int
1291 bridge_ioctl_gpri(struct bridge_softc *sc, void *arg)
1292 {
1293 	struct ifbrparam *param = arg;
1294 	struct bstp_state *bs = &sc->sc_stp;
1295 
1296 	param->ifbrp_prio = bs->bs_bridge_priority;
1297 	return (0);
1298 }
1299 
1300 static int
1301 bridge_ioctl_spri(struct bridge_softc *sc, void *arg)
1302 {
1303 	struct ifbrparam *param = arg;
1304 
1305 	return (bstp_set_priority(&sc->sc_stp, param->ifbrp_prio));
1306 }
1307 
1308 static int
1309 bridge_ioctl_ght(struct bridge_softc *sc, void *arg)
1310 {
1311 	struct ifbrparam *param = arg;
1312 	struct bstp_state *bs = &sc->sc_stp;
1313 
1314 	param->ifbrp_hellotime = bs->bs_bridge_htime >> 8;
1315 	return (0);
1316 }
1317 
1318 static int
1319 bridge_ioctl_sht(struct bridge_softc *sc, void *arg)
1320 {
1321 	struct ifbrparam *param = arg;
1322 
1323 	return (bstp_set_htime(&sc->sc_stp, param->ifbrp_hellotime));
1324 }
1325 
1326 static int
1327 bridge_ioctl_gfd(struct bridge_softc *sc, void *arg)
1328 {
1329 	struct ifbrparam *param = arg;
1330 	struct bstp_state *bs = &sc->sc_stp;
1331 
1332 	param->ifbrp_fwddelay = bs->bs_bridge_fdelay >> 8;
1333 	return (0);
1334 }
1335 
1336 static int
1337 bridge_ioctl_sfd(struct bridge_softc *sc, void *arg)
1338 {
1339 	struct ifbrparam *param = arg;
1340 
1341 	return (bstp_set_fdelay(&sc->sc_stp, param->ifbrp_fwddelay));
1342 }
1343 
1344 static int
1345 bridge_ioctl_gma(struct bridge_softc *sc, void *arg)
1346 {
1347 	struct ifbrparam *param = arg;
1348 	struct bstp_state *bs = &sc->sc_stp;
1349 
1350 	param->ifbrp_maxage = bs->bs_bridge_max_age >> 8;
1351 	return (0);
1352 }
1353 
1354 static int
1355 bridge_ioctl_sma(struct bridge_softc *sc, void *arg)
1356 {
1357 	struct ifbrparam *param = arg;
1358 
1359 	return (bstp_set_maxage(&sc->sc_stp, param->ifbrp_maxage));
1360 }
1361 
1362 static int
1363 bridge_ioctl_sifprio(struct bridge_softc *sc, void *arg)
1364 {
1365 	struct ifbreq *req = arg;
1366 	struct bridge_iflist *bif;
1367 
1368 	bif = bridge_lookup_member(sc, req->ifbr_ifsname);
1369 	if (bif == NULL)
1370 		return (ENOENT);
1371 
1372 	return (bstp_set_port_priority(&bif->bif_stp, req->ifbr_priority));
1373 }
1374 
1375 static int
1376 bridge_ioctl_sifcost(struct bridge_softc *sc, void *arg)
1377 {
1378 	struct ifbreq *req = arg;
1379 	struct bridge_iflist *bif;
1380 
1381 	bif = bridge_lookup_member(sc, req->ifbr_ifsname);
1382 	if (bif == NULL)
1383 		return (ENOENT);
1384 
1385 	return (bstp_set_path_cost(&bif->bif_stp, req->ifbr_path_cost));
1386 }
1387 
1388 static int
1389 bridge_ioctl_sifmaxaddr(struct bridge_softc *sc, void *arg)
1390 {
1391 	struct ifbreq *req = arg;
1392 	struct bridge_iflist *bif;
1393 
1394 	bif = bridge_lookup_member(sc, req->ifbr_ifsname);
1395 	if (bif == NULL)
1396 		return (ENOENT);
1397 
1398 	bif->bif_addrmax = req->ifbr_addrmax;
1399 	return (0);
1400 }
1401 
1402 static int
1403 bridge_ioctl_addspan(struct bridge_softc *sc, void *arg)
1404 {
1405 	struct ifbreq *req = arg;
1406 	struct bridge_iflist *bif = NULL;
1407 	struct ifnet *ifs;
1408 
1409 	ifs = ifunit(req->ifbr_ifsname);
1410 	if (ifs == NULL)
1411 		return (ENOENT);
1412 
1413 	LIST_FOREACH(bif, &sc->sc_spanlist, bif_next)
1414 		if (ifs == bif->bif_ifp)
1415 			return (EBUSY);
1416 
1417 	if (ifs->if_bridge != NULL)
1418 		return (EBUSY);
1419 
1420 	switch (ifs->if_type) {
1421 		case IFT_ETHER:
1422 		case IFT_GIF:
1423 		case IFT_L2VLAN:
1424 			break;
1425 		default:
1426 			return (EINVAL);
1427 	}
1428 
1429 	bif = malloc(sizeof(*bif), M_DEVBUF, M_NOWAIT|M_ZERO);
1430 	if (bif == NULL)
1431 		return (ENOMEM);
1432 
1433 	bif->bif_ifp = ifs;
1434 	bif->bif_flags = IFBIF_SPAN;
1435 
1436 	LIST_INSERT_HEAD(&sc->sc_spanlist, bif, bif_next);
1437 
1438 	return (0);
1439 }
1440 
1441 static int
1442 bridge_ioctl_delspan(struct bridge_softc *sc, void *arg)
1443 {
1444 	struct ifbreq *req = arg;
1445 	struct bridge_iflist *bif;
1446 	struct ifnet *ifs;
1447 
1448 	ifs = ifunit(req->ifbr_ifsname);
1449 	if (ifs == NULL)
1450 		return (ENOENT);
1451 
1452 	LIST_FOREACH(bif, &sc->sc_spanlist, bif_next)
1453 		if (ifs == bif->bif_ifp)
1454 			break;
1455 
1456 	if (bif == NULL)
1457 		return (ENOENT);
1458 
1459 	bridge_delete_span(sc, bif);
1460 
1461 	return (0);
1462 }
1463 
1464 static int
1465 bridge_ioctl_gbparam(struct bridge_softc *sc, void *arg)
1466 {
1467 	struct ifbropreq *req = arg;
1468 	struct bstp_state *bs = &sc->sc_stp;
1469 	struct bstp_port *root_port;
1470 
1471 	req->ifbop_maxage = bs->bs_bridge_max_age >> 8;
1472 	req->ifbop_hellotime = bs->bs_bridge_htime >> 8;
1473 	req->ifbop_fwddelay = bs->bs_bridge_fdelay >> 8;
1474 
1475 	root_port = bs->bs_root_port;
1476 	if (root_port == NULL)
1477 		req->ifbop_root_port = 0;
1478 	else
1479 		req->ifbop_root_port = root_port->bp_ifp->if_index;
1480 
1481 	req->ifbop_holdcount = bs->bs_txholdcount;
1482 	req->ifbop_priority = bs->bs_bridge_priority;
1483 	req->ifbop_protocol = bs->bs_protover;
1484 	req->ifbop_root_path_cost = bs->bs_root_pv.pv_cost;
1485 	req->ifbop_bridgeid = bs->bs_bridge_pv.pv_dbridge_id;
1486 	req->ifbop_designated_root = bs->bs_root_pv.pv_root_id;
1487 	req->ifbop_designated_bridge = bs->bs_root_pv.pv_dbridge_id;
1488 	req->ifbop_last_tc_time.tv_sec = bs->bs_last_tc_time.tv_sec;
1489 	req->ifbop_last_tc_time.tv_usec = bs->bs_last_tc_time.tv_usec;
1490 
1491 	return (0);
1492 }
1493 
1494 static int
1495 bridge_ioctl_grte(struct bridge_softc *sc, void *arg)
1496 {
1497 	struct ifbrparam *param = arg;
1498 
1499 	param->ifbrp_cexceeded = sc->sc_brtexceeded;
1500 	return (0);
1501 }
1502 
1503 static int
1504 bridge_ioctl_gifsstp(struct bridge_softc *sc, void *arg)
1505 {
1506 	struct ifbpstpconf *bifstp = arg;
1507 	struct bridge_iflist *bif;
1508 	struct bstp_port *bp;
1509 	struct ifbpstpreq bpreq;
1510 	char *buf, *outbuf;
1511 	int count, buflen, len, error = 0;
1512 
1513 	count = 0;
1514 	LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
1515 		if ((bif->bif_flags & IFBIF_STP) != 0)
1516 			count++;
1517 	}
1518 
1519 	buflen = sizeof(bpreq) * count;
1520 	if (bifstp->ifbpstp_len == 0) {
1521 		bifstp->ifbpstp_len = buflen;
1522 		return (0);
1523 	}
1524 
1525 	BRIDGE_UNLOCK(sc);
1526 	outbuf = malloc(buflen, M_TEMP, M_WAITOK | M_ZERO);
1527 	BRIDGE_LOCK(sc);
1528 
1529 	count = 0;
1530 	buf = outbuf;
1531 	len = min(bifstp->ifbpstp_len, buflen);
1532 	bzero(&bpreq, sizeof(bpreq));
1533 	LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
1534 		if (len < sizeof(bpreq))
1535 			break;
1536 
1537 		if ((bif->bif_flags & IFBIF_STP) == 0)
1538 			continue;
1539 
1540 		bp = &bif->bif_stp;
1541 		bpreq.ifbp_portno = bif->bif_ifp->if_index & 0xfff;
1542 		bpreq.ifbp_fwd_trans = bp->bp_forward_transitions;
1543 		bpreq.ifbp_design_cost = bp->bp_desg_pv.pv_cost;
1544 		bpreq.ifbp_design_port = bp->bp_desg_pv.pv_port_id;
1545 		bpreq.ifbp_design_bridge = bp->bp_desg_pv.pv_dbridge_id;
1546 		bpreq.ifbp_design_root = bp->bp_desg_pv.pv_root_id;
1547 
1548 		memcpy(buf, &bpreq, sizeof(bpreq));
1549 		count++;
1550 		buf += sizeof(bpreq);
1551 		len -= sizeof(bpreq);
1552 	}
1553 
1554 	BRIDGE_UNLOCK(sc);
1555 	bifstp->ifbpstp_len = sizeof(bpreq) * count;
1556 	error = copyout(outbuf, bifstp->ifbpstp_req, bifstp->ifbpstp_len);
1557 	BRIDGE_LOCK(sc);
1558 	free(outbuf, M_TEMP);
1559 	return (error);
1560 }
1561 
1562 static int
1563 bridge_ioctl_sproto(struct bridge_softc *sc, void *arg)
1564 {
1565 	struct ifbrparam *param = arg;
1566 
1567 	return (bstp_set_protocol(&sc->sc_stp, param->ifbrp_proto));
1568 }
1569 
1570 static int
1571 bridge_ioctl_stxhc(struct bridge_softc *sc, void *arg)
1572 {
1573 	struct ifbrparam *param = arg;
1574 
1575 	return (bstp_set_holdcount(&sc->sc_stp, param->ifbrp_txhc));
1576 }
1577 
1578 /*
1579  * bridge_ifdetach:
1580  *
1581  *	Detach an interface from a bridge.  Called when a member
1582  *	interface is detaching.
1583  */
1584 static void
1585 bridge_ifdetach(void *arg __unused, struct ifnet *ifp)
1586 {
1587 	struct bridge_softc *sc = ifp->if_bridge;
1588 	struct bridge_iflist *bif;
1589 
1590 	/* Check if the interface is a bridge member */
1591 	if (sc != NULL) {
1592 		BRIDGE_LOCK(sc);
1593 
1594 		bif = bridge_lookup_member_if(sc, ifp);
1595 		if (bif != NULL)
1596 			bridge_delete_member(sc, bif, 1);
1597 
1598 		BRIDGE_UNLOCK(sc);
1599 		return;
1600 	}
1601 
1602 	/* Check if the interface is a span port */
1603 	mtx_lock(&bridge_list_mtx);
1604 	LIST_FOREACH(sc, &bridge_list, sc_list) {
1605 		BRIDGE_LOCK(sc);
1606 		LIST_FOREACH(bif, &sc->sc_spanlist, bif_next)
1607 			if (ifp == bif->bif_ifp) {
1608 				bridge_delete_span(sc, bif);
1609 				break;
1610 			}
1611 
1612 		BRIDGE_UNLOCK(sc);
1613 	}
1614 	mtx_unlock(&bridge_list_mtx);
1615 }
1616 
1617 /*
1618  * bridge_init:
1619  *
1620  *	Initialize a bridge interface.
1621  */
1622 static void
1623 bridge_init(void *xsc)
1624 {
1625 	struct bridge_softc *sc = (struct bridge_softc *)xsc;
1626 	struct ifnet *ifp = sc->sc_ifp;
1627 
1628 	if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1629 		return;
1630 
1631 	BRIDGE_LOCK(sc);
1632 	callout_reset(&sc->sc_brcallout, bridge_rtable_prune_period * hz,
1633 	    bridge_timer, sc);
1634 
1635 	ifp->if_drv_flags |= IFF_DRV_RUNNING;
1636 	bstp_init(&sc->sc_stp);		/* Initialize Spanning Tree */
1637 
1638 	BRIDGE_UNLOCK(sc);
1639 }
1640 
1641 /*
1642  * bridge_stop:
1643  *
1644  *	Stop the bridge interface.
1645  */
1646 static void
1647 bridge_stop(struct ifnet *ifp, int disable)
1648 {
1649 	struct bridge_softc *sc = ifp->if_softc;
1650 
1651 	BRIDGE_LOCK_ASSERT(sc);
1652 
1653 	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1654 		return;
1655 
1656 	callout_stop(&sc->sc_brcallout);
1657 	bstp_stop(&sc->sc_stp);
1658 
1659 	bridge_rtflush(sc, IFBF_FLUSHDYN);
1660 
1661 	ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1662 }
1663 
1664 /*
1665  * bridge_enqueue:
1666  *
1667  *	Enqueue a packet on a bridge member interface.
1668  *
1669  */
1670 static void
1671 bridge_enqueue(struct bridge_softc *sc, struct ifnet *dst_ifp, struct mbuf *m)
1672 {
1673 	int len, err = 0;
1674 	short mflags;
1675 	struct mbuf *m0;
1676 
1677 	len = m->m_pkthdr.len;
1678 	mflags = m->m_flags;
1679 
1680 	/* We may be sending a fragment so traverse the mbuf */
1681 	for (; m; m = m0) {
1682 		m0 = m->m_nextpkt;
1683 		m->m_nextpkt = NULL;
1684 
1685 		/*
1686 		 * If underlying interface can not do VLAN tag insertion itself
1687 		 * then attach a packet tag that holds it.
1688 		 */
1689 		if ((m->m_flags & M_VLANTAG) &&
1690 		    (dst_ifp->if_capenable & IFCAP_VLAN_HWTAGGING) == 0) {
1691 			m = ether_vlanencap(m, m->m_pkthdr.ether_vtag);
1692 			if (m == NULL) {
1693 				if_printf(dst_ifp,
1694 				    "unable to prepend VLAN header\n");
1695 				dst_ifp->if_oerrors++;
1696 				continue;
1697 			}
1698 			m->m_flags &= ~M_VLANTAG;
1699 		}
1700 
1701 		if (err == 0)
1702 			IFQ_ENQUEUE(&dst_ifp->if_snd, m, err);
1703 	}
1704 
1705 	if (err == 0) {
1706 
1707 		sc->sc_ifp->if_opackets++;
1708 		sc->sc_ifp->if_obytes += len;
1709 
1710 		dst_ifp->if_obytes += len;
1711 
1712 		if (mflags & M_MCAST) {
1713 			sc->sc_ifp->if_omcasts++;
1714 			dst_ifp->if_omcasts++;
1715 		}
1716 	}
1717 
1718 	if ((dst_ifp->if_drv_flags & IFF_DRV_OACTIVE) == 0)
1719 		(*dst_ifp->if_start)(dst_ifp);
1720 }
1721 
1722 /*
1723  * bridge_dummynet:
1724  *
1725  * 	Receive a queued packet from dummynet and pass it on to the output
1726  * 	interface.
1727  *
1728  *	The mbuf has the Ethernet header already attached.
1729  */
1730 static void
1731 bridge_dummynet(struct mbuf *m, struct ifnet *ifp)
1732 {
1733 	struct bridge_softc *sc;
1734 
1735 	sc = ifp->if_bridge;
1736 
1737 	/*
1738 	 * The packet didnt originate from a member interface. This should only
1739 	 * ever happen if a member interface is removed while packets are
1740 	 * queued for it.
1741 	 */
1742 	if (sc == NULL) {
1743 		m_freem(m);
1744 		return;
1745 	}
1746 
1747 	if (PFIL_HOOKED(&inet_pfil_hook)
1748 #ifdef INET6
1749 	    || PFIL_HOOKED(&inet6_pfil_hook)
1750 #endif
1751 	    ) {
1752 		if (bridge_pfil(&m, sc->sc_ifp, ifp, PFIL_OUT) != 0)
1753 			return;
1754 		if (m == NULL)
1755 			return;
1756 	}
1757 
1758 	bridge_enqueue(sc, ifp, m);
1759 }
1760 
1761 /*
1762  * bridge_output:
1763  *
1764  *	Send output from a bridge member interface.  This
1765  *	performs the bridging function for locally originated
1766  *	packets.
1767  *
1768  *	The mbuf has the Ethernet header already attached.  We must
1769  *	enqueue or free the mbuf before returning.
1770  */
1771 static int
1772 bridge_output(struct ifnet *ifp, struct mbuf *m, struct sockaddr *sa,
1773     struct rtentry *rt)
1774 {
1775 	struct ether_header *eh;
1776 	struct ifnet *dst_if;
1777 	struct bridge_softc *sc;
1778 	uint16_t vlan;
1779 
1780 	if (m->m_len < ETHER_HDR_LEN) {
1781 		m = m_pullup(m, ETHER_HDR_LEN);
1782 		if (m == NULL)
1783 			return (0);
1784 	}
1785 
1786 	eh = mtod(m, struct ether_header *);
1787 	sc = ifp->if_bridge;
1788 	vlan = VLANTAGOF(m);
1789 
1790 	BRIDGE_LOCK(sc);
1791 
1792 	/*
1793 	 * If bridge is down, but the original output interface is up,
1794 	 * go ahead and send out that interface.  Otherwise, the packet
1795 	 * is dropped below.
1796 	 */
1797 	if ((sc->sc_ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
1798 		dst_if = ifp;
1799 		goto sendunicast;
1800 	}
1801 
1802 	/*
1803 	 * If the packet is a multicast, or we don't know a better way to
1804 	 * get there, send to all interfaces.
1805 	 */
1806 	if (ETHER_IS_MULTICAST(eh->ether_dhost))
1807 		dst_if = NULL;
1808 	else
1809 		dst_if = bridge_rtlookup(sc, eh->ether_dhost, vlan);
1810 	if (dst_if == NULL) {
1811 		struct bridge_iflist *bif;
1812 		struct mbuf *mc;
1813 		int error = 0, used = 0;
1814 
1815 		bridge_span(sc, m);
1816 
1817 		BRIDGE_LOCK2REF(sc, error);
1818 		if (error) {
1819 			m_freem(m);
1820 			return (0);
1821 		}
1822 
1823 		LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
1824 			dst_if = bif->bif_ifp;
1825 
1826 			if (dst_if->if_type == IFT_GIF)
1827 				continue;
1828 			if ((dst_if->if_drv_flags & IFF_DRV_RUNNING) == 0)
1829 				continue;
1830 
1831 			/*
1832 			 * If this is not the original output interface,
1833 			 * and the interface is participating in spanning
1834 			 * tree, make sure the port is in a state that
1835 			 * allows forwarding.
1836 			 */
1837 			if (dst_if != ifp && (bif->bif_flags & IFBIF_STP) &&
1838 			    bif->bif_stp.bp_state == BSTP_IFSTATE_DISCARDING)
1839 				continue;
1840 
1841 			if (LIST_NEXT(bif, bif_next) == NULL) {
1842 				used = 1;
1843 				mc = m;
1844 			} else {
1845 				mc = m_copypacket(m, M_DONTWAIT);
1846 				if (mc == NULL) {
1847 					sc->sc_ifp->if_oerrors++;
1848 					continue;
1849 				}
1850 			}
1851 
1852 			bridge_enqueue(sc, dst_if, mc);
1853 		}
1854 		if (used == 0)
1855 			m_freem(m);
1856 		BRIDGE_UNREF(sc);
1857 		return (0);
1858 	}
1859 
1860 sendunicast:
1861 	/*
1862 	 * XXX Spanning tree consideration here?
1863 	 */
1864 
1865 	bridge_span(sc, m);
1866 	if ((dst_if->if_drv_flags & IFF_DRV_RUNNING) == 0) {
1867 		m_freem(m);
1868 		BRIDGE_UNLOCK(sc);
1869 		return (0);
1870 	}
1871 
1872 	BRIDGE_UNLOCK(sc);
1873 	bridge_enqueue(sc, dst_if, m);
1874 	return (0);
1875 }
1876 
1877 /*
1878  * bridge_start:
1879  *
1880  *	Start output on a bridge.
1881  *
1882  */
1883 static void
1884 bridge_start(struct ifnet *ifp)
1885 {
1886 	struct bridge_softc *sc;
1887 	struct mbuf *m;
1888 	struct ether_header *eh;
1889 	struct ifnet *dst_if;
1890 
1891 	sc = ifp->if_softc;
1892 
1893 	ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1894 	for (;;) {
1895 		IFQ_DEQUEUE(&ifp->if_snd, m);
1896 		if (m == 0)
1897 			break;
1898 		ETHER_BPF_MTAP(ifp, m);
1899 
1900 		eh = mtod(m, struct ether_header *);
1901 		dst_if = NULL;
1902 
1903 		BRIDGE_LOCK(sc);
1904 		if ((m->m_flags & (M_BCAST|M_MCAST)) == 0) {
1905 			dst_if = bridge_rtlookup(sc, eh->ether_dhost, 1);
1906 		}
1907 
1908 		if (dst_if == NULL)
1909 			bridge_broadcast(sc, ifp, m, 0);
1910 		else {
1911 			BRIDGE_UNLOCK(sc);
1912 			bridge_enqueue(sc, dst_if, m);
1913 		}
1914 	}
1915 	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1916 }
1917 
1918 /*
1919  * bridge_forward:
1920  *
1921  *	The forwarding function of the bridge.
1922  *
1923  *	NOTE: Releases the lock on return.
1924  */
1925 static void
1926 bridge_forward(struct bridge_softc *sc, struct bridge_iflist *sbif,
1927     struct mbuf *m)
1928 {
1929 	struct bridge_iflist *dbif;
1930 	struct ifnet *src_if, *dst_if, *ifp;
1931 	struct ether_header *eh;
1932 	uint16_t vlan;
1933 	int error;
1934 
1935 	src_if = m->m_pkthdr.rcvif;
1936 	ifp = sc->sc_ifp;
1937 
1938 	ifp->if_ipackets++;
1939 	ifp->if_ibytes += m->m_pkthdr.len;
1940 	vlan = VLANTAGOF(m);
1941 
1942 	if ((sbif->bif_flags & IFBIF_STP) &&
1943 	    sbif->bif_stp.bp_state == BSTP_IFSTATE_DISCARDING)
1944 		goto drop;
1945 
1946 	eh = mtod(m, struct ether_header *);
1947 
1948 	/* If the interface is learning, record the address. */
1949 	if (sbif->bif_flags & IFBIF_LEARNING) {
1950 		error = bridge_rtupdate(sc, eh->ether_shost, vlan,
1951 		    sbif, 0, IFBAF_DYNAMIC);
1952 		/*
1953 		 * If the interface has addresses limits then deny any source
1954 		 * that is not in the cache.
1955 		 */
1956 		if (error && sbif->bif_addrmax)
1957 			goto drop;
1958 	}
1959 
1960 	if ((sbif->bif_flags & IFBIF_STP) != 0 &&
1961 	    sbif->bif_stp.bp_state == BSTP_IFSTATE_LEARNING)
1962 		goto drop;
1963 
1964 	/*
1965 	 * At this point, the port either doesn't participate
1966 	 * in spanning tree or it is in the forwarding state.
1967 	 */
1968 
1969 	/*
1970 	 * If the packet is unicast, destined for someone on
1971 	 * "this" side of the bridge, drop it.
1972 	 */
1973 	if ((m->m_flags & (M_BCAST|M_MCAST)) == 0) {
1974 		dst_if = bridge_rtlookup(sc, eh->ether_dhost, vlan);
1975 		if (src_if == dst_if)
1976 			goto drop;
1977 	} else {
1978 		/* ...forward it to all interfaces. */
1979 		ifp->if_imcasts++;
1980 		dst_if = NULL;
1981 	}
1982 
1983 	/*
1984 	 * If we have a destination interface which is a member of our bridge,
1985 	 * OR this is a unicast packet, push it through the bpf(4) machinery.
1986 	 * For broadcast or multicast packets, don't bother because it will
1987 	 * be reinjected into ether_input. We do this before we pass the packets
1988 	 * through the pfil(9) framework, as it is possible that pfil(9) will
1989 	 * drop the packet, or possibly modify it, making it difficult to debug
1990 	 * firewall issues on the bridge.
1991 	 */
1992 	if (dst_if != NULL || (m->m_flags & (M_BCAST | M_MCAST)) == 0)
1993 		ETHER_BPF_MTAP(ifp, m);
1994 
1995 	/* run the packet filter */
1996 	if (PFIL_HOOKED(&inet_pfil_hook)
1997 #ifdef INET6
1998 	    || PFIL_HOOKED(&inet6_pfil_hook)
1999 #endif
2000 	    ) {
2001 		BRIDGE_UNLOCK(sc);
2002 		if (bridge_pfil(&m, ifp, src_if, PFIL_IN) != 0)
2003 			return;
2004 		if (m == NULL)
2005 			return;
2006 		BRIDGE_LOCK(sc);
2007 	}
2008 
2009 	if (dst_if == NULL) {
2010 		bridge_broadcast(sc, src_if, m, 1);
2011 		return;
2012 	}
2013 
2014 	/*
2015 	 * At this point, we're dealing with a unicast frame
2016 	 * going to a different interface.
2017 	 */
2018 	if ((dst_if->if_drv_flags & IFF_DRV_RUNNING) == 0)
2019 		goto drop;
2020 
2021 	dbif = bridge_lookup_member_if(sc, dst_if);
2022 	if (dbif == NULL)
2023 		/* Not a member of the bridge (anymore?) */
2024 		goto drop;
2025 
2026 	/* Private segments can not talk to each other */
2027 	if (sbif->bif_flags & dbif->bif_flags & IFBIF_PRIVATE)
2028 		goto drop;
2029 
2030 	if ((dbif->bif_flags & IFBIF_STP) &&
2031 	    dbif->bif_stp.bp_state == BSTP_IFSTATE_DISCARDING)
2032 		goto drop;
2033 
2034 	BRIDGE_UNLOCK(sc);
2035 
2036 	if (PFIL_HOOKED(&inet_pfil_hook)
2037 #ifdef INET6
2038 	    || PFIL_HOOKED(&inet6_pfil_hook)
2039 #endif
2040 	    ) {
2041 		if (bridge_pfil(&m, ifp, dst_if, PFIL_OUT) != 0)
2042 			return;
2043 		if (m == NULL)
2044 			return;
2045 	}
2046 
2047 	bridge_enqueue(sc, dst_if, m);
2048 	return;
2049 
2050 drop:
2051 	BRIDGE_UNLOCK(sc);
2052 	m_freem(m);
2053 }
2054 
2055 /*
2056  * bridge_input:
2057  *
2058  *	Receive input from a member interface.  Queue the packet for
2059  *	bridging if it is not for us.
2060  */
2061 static struct mbuf *
2062 bridge_input(struct ifnet *ifp, struct mbuf *m)
2063 {
2064 	struct bridge_softc *sc = ifp->if_bridge;
2065 	struct bridge_iflist *bif, *bif2;
2066 	struct ifnet *bifp;
2067 	struct ether_header *eh;
2068 	struct mbuf *mc, *mc2;
2069 	uint16_t vlan;
2070 	int error;
2071 
2072 	if ((sc->sc_ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
2073 		return (m);
2074 
2075 	bifp = sc->sc_ifp;
2076 	vlan = VLANTAGOF(m);
2077 
2078 	/*
2079 	 * Implement support for bridge monitoring. If this flag has been
2080 	 * set on this interface, discard the packet once we push it through
2081 	 * the bpf(4) machinery, but before we do, increment the byte and
2082 	 * packet counters associated with this interface.
2083 	 */
2084 	if ((bifp->if_flags & IFF_MONITOR) != 0) {
2085 		m->m_pkthdr.rcvif  = bifp;
2086 		ETHER_BPF_MTAP(bifp, m);
2087 		bifp->if_ipackets++;
2088 		bifp->if_ibytes += m->m_pkthdr.len;
2089 		m_freem(m);
2090 		return (NULL);
2091 	}
2092 	BRIDGE_LOCK(sc);
2093 	bif = bridge_lookup_member_if(sc, ifp);
2094 	if (bif == NULL) {
2095 		BRIDGE_UNLOCK(sc);
2096 		return (m);
2097 	}
2098 
2099 	eh = mtod(m, struct ether_header *);
2100 
2101 	if (memcmp(eh->ether_dhost, IF_LLADDR(bifp),
2102 	    ETHER_ADDR_LEN) == 0) {
2103 		/* Block redundant paths to us */
2104 		if ((bif->bif_flags & IFBIF_STP) &&
2105 		    bif->bif_stp.bp_state == BSTP_IFSTATE_DISCARDING) {
2106 			BRIDGE_UNLOCK(sc);
2107 			return (m);
2108 		}
2109 
2110 		/*
2111 		 * Filter on the physical interface.
2112 		 */
2113 		if (pfil_local_phys && (PFIL_HOOKED(&inet_pfil_hook)
2114 #ifdef INET6
2115 		    || PFIL_HOOKED(&inet6_pfil_hook)
2116 #endif
2117 		    )) {
2118 			if (bridge_pfil(&m, NULL, ifp, PFIL_IN) != 0 ||
2119 			    m == NULL) {
2120 				BRIDGE_UNLOCK(sc);
2121 				return (NULL);
2122 			}
2123 		}
2124 
2125 		/*
2126 		 * If the packet is for us, set the packets source as the
2127 		 * bridge, and return the packet back to ether_input for
2128 		 * local processing.
2129 		 */
2130 
2131 		/* Note where to send the reply to */
2132 		if (bif->bif_flags & IFBIF_LEARNING) {
2133 			error = bridge_rtupdate(sc,
2134 			    eh->ether_shost, vlan, bif, 0, IFBAF_DYNAMIC);
2135 			/*
2136 			 * If the interface has addresses limits then deny any
2137 			 * source that is not in the cache.
2138 			 */
2139 			if (error && bif->bif_addrmax) {
2140 				BRIDGE_UNLOCK(sc);
2141 				m_freem(m);
2142 				return (NULL);
2143 			}
2144 		}
2145 
2146 		/* Mark the packet as arriving on the bridge interface */
2147 		m->m_pkthdr.rcvif = bifp;
2148 		ETHER_BPF_MTAP(bifp, m);
2149 		bifp->if_ipackets++;
2150 
2151 		BRIDGE_UNLOCK(sc);
2152 		return (m);
2153 	}
2154 
2155 	bridge_span(sc, m);
2156 
2157 	if (m->m_flags & (M_BCAST|M_MCAST)) {
2158 		/* Tap off 802.1D packets; they do not get forwarded. */
2159 		if (memcmp(eh->ether_dhost, bstp_etheraddr,
2160 		    ETHER_ADDR_LEN) == 0) {
2161 			m = bstp_input(&bif->bif_stp, ifp, m);
2162 			if (m == NULL) {
2163 				BRIDGE_UNLOCK(sc);
2164 				return (NULL);
2165 			}
2166 		}
2167 
2168 		if ((bif->bif_flags & IFBIF_STP) &&
2169 		    bif->bif_stp.bp_state == BSTP_IFSTATE_DISCARDING) {
2170 			BRIDGE_UNLOCK(sc);
2171 			return (m);
2172 		}
2173 
2174 		/*
2175 		 * Make a deep copy of the packet and enqueue the copy
2176 		 * for bridge processing; return the original packet for
2177 		 * local processing.
2178 		 */
2179 		mc = m_dup(m, M_DONTWAIT);
2180 		if (mc == NULL) {
2181 			BRIDGE_UNLOCK(sc);
2182 			return (m);
2183 		}
2184 
2185 		/* Perform the bridge forwarding function with the copy. */
2186 		bridge_forward(sc, bif, mc);
2187 
2188 		/*
2189 		 * Reinject the mbuf as arriving on the bridge so we have a
2190 		 * chance at claiming multicast packets. We can not loop back
2191 		 * here from ether_input as a bridge is never a member of a
2192 		 * bridge.
2193 		 */
2194 		KASSERT(bifp->if_bridge == NULL,
2195 		    ("loop created in bridge_input"));
2196 		mc2 = m_dup(m, M_DONTWAIT);
2197 		if (mc2 != NULL) {
2198 			/* Keep the layer3 header aligned */
2199 			int i = min(mc2->m_pkthdr.len, max_protohdr);
2200 			mc2 = m_copyup(mc2, i, ETHER_ALIGN);
2201 		}
2202 		if (mc2 != NULL) {
2203 			mc2->m_pkthdr.rcvif = bifp;
2204 			(*bifp->if_input)(bifp, mc2);
2205 		}
2206 
2207 		/* Return the original packet for local processing. */
2208 		return (m);
2209 	}
2210 
2211 	if ((bif->bif_flags & IFBIF_STP) &&
2212 	    bif->bif_stp.bp_state == BSTP_IFSTATE_DISCARDING) {
2213 		BRIDGE_UNLOCK(sc);
2214 		return (m);
2215 	}
2216 
2217 #ifdef DEV_CARP
2218 #   define OR_CARP_CHECK_WE_ARE_DST(iface) \
2219 	|| ((iface)->if_carp \
2220 	    && carp_forus((iface)->if_carp, eh->ether_dhost))
2221 #   define OR_CARP_CHECK_WE_ARE_SRC(iface) \
2222 	|| ((iface)->if_carp \
2223 	    && carp_forus((iface)->if_carp, eh->ether_shost))
2224 #else
2225 #   define OR_CARP_CHECK_WE_ARE_DST(iface)
2226 #   define OR_CARP_CHECK_WE_ARE_SRC(iface)
2227 #endif
2228 
2229 #define GRAB_OUR_PACKETS(iface) \
2230 	if ((iface)->if_type == IFT_GIF) \
2231 		continue; \
2232 	/* It is destined for us. */ \
2233 	if (memcmp(IF_LLADDR((iface)), eh->ether_dhost,  ETHER_ADDR_LEN) == 0 \
2234 	    OR_CARP_CHECK_WE_ARE_DST((iface))				\
2235 	    ) {								\
2236 		if (bif->bif_flags & IFBIF_LEARNING) {			\
2237 			error = bridge_rtupdate(sc, eh->ether_shost,	\
2238 			    vlan, bif, 0, IFBAF_DYNAMIC);		\
2239 			if (error && bif->bif_addrmax) {		\
2240 				BRIDGE_UNLOCK(sc);			\
2241 				m_freem(m);				\
2242 				return (NULL);				\
2243 			}						\
2244 		}							\
2245 		m->m_pkthdr.rcvif = iface;				\
2246 		BRIDGE_UNLOCK(sc);					\
2247 		return (m);						\
2248 	}								\
2249 									\
2250 	/* We just received a packet that we sent out. */		\
2251 	if (memcmp(IF_LLADDR((iface)), eh->ether_shost, ETHER_ADDR_LEN) == 0 \
2252 	    OR_CARP_CHECK_WE_ARE_SRC((iface))			\
2253 	    ) {								\
2254 		BRIDGE_UNLOCK(sc);					\
2255 		m_freem(m);						\
2256 		return (NULL);						\
2257 	}
2258 
2259 	/*
2260 	 * Unicast.  Make sure it's not for us.
2261 	 *
2262 	 * Give a chance for ifp at first priority. This will help when	the
2263 	 * packet comes through the interface like VLAN's with the same MACs
2264 	 * on several interfaces from the same bridge. This also will save
2265 	 * some CPU cycles in case the destination interface and the input
2266 	 * interface (eq ifp) are the same.
2267 	 */
2268 	do { GRAB_OUR_PACKETS(ifp) } while (0);
2269 
2270 	/* Now check the all bridge members. */
2271 	LIST_FOREACH(bif2, &sc->sc_iflist, bif_next) {
2272 		GRAB_OUR_PACKETS(bif2->bif_ifp)
2273 	}
2274 
2275 #undef OR_CARP_CHECK_WE_ARE_DST
2276 #undef OR_CARP_CHECK_WE_ARE_SRC
2277 #undef GRAB_OUR_PACKETS
2278 
2279 	/* Perform the bridge forwarding function. */
2280 	bridge_forward(sc, bif, m);
2281 
2282 	return (NULL);
2283 }
2284 
2285 /*
2286  * bridge_broadcast:
2287  *
2288  *	Send a frame to all interfaces that are members of
2289  *	the bridge, except for the one on which the packet
2290  *	arrived.
2291  *
2292  *	NOTE: Releases the lock on return.
2293  */
2294 static void
2295 bridge_broadcast(struct bridge_softc *sc, struct ifnet *src_if,
2296     struct mbuf *m, int runfilt)
2297 {
2298 	struct bridge_iflist *dbif, *sbif;
2299 	struct mbuf *mc;
2300 	struct ifnet *dst_if;
2301 	int error = 0, used = 0, i;
2302 
2303 	sbif = bridge_lookup_member_if(sc, src_if);
2304 
2305 	BRIDGE_LOCK2REF(sc, error);
2306 	if (error) {
2307 		m_freem(m);
2308 		return;
2309 	}
2310 
2311 	/* Filter on the bridge interface before broadcasting */
2312 	if (runfilt && (PFIL_HOOKED(&inet_pfil_hook)
2313 #ifdef INET6
2314 	    || PFIL_HOOKED(&inet6_pfil_hook)
2315 #endif
2316 	    )) {
2317 		if (bridge_pfil(&m, sc->sc_ifp, NULL, PFIL_OUT) != 0)
2318 			goto out;
2319 		if (m == NULL)
2320 			goto out;
2321 	}
2322 
2323 	LIST_FOREACH(dbif, &sc->sc_iflist, bif_next) {
2324 		dst_if = dbif->bif_ifp;
2325 		if (dst_if == src_if)
2326 			continue;
2327 
2328 		/* Private segments can not talk to each other */
2329 		if (sbif && (sbif->bif_flags & dbif->bif_flags & IFBIF_PRIVATE))
2330 			continue;
2331 
2332 		if ((dbif->bif_flags & IFBIF_STP) &&
2333 		    dbif->bif_stp.bp_state == BSTP_IFSTATE_DISCARDING)
2334 			continue;
2335 
2336 		if ((dbif->bif_flags & IFBIF_DISCOVER) == 0 &&
2337 		    (m->m_flags & (M_BCAST|M_MCAST)) == 0)
2338 			continue;
2339 
2340 		if ((dst_if->if_drv_flags & IFF_DRV_RUNNING) == 0)
2341 			continue;
2342 
2343 		if (LIST_NEXT(dbif, bif_next) == NULL) {
2344 			mc = m;
2345 			used = 1;
2346 		} else {
2347 			mc = m_dup(m, M_DONTWAIT);
2348 			if (mc == NULL) {
2349 				sc->sc_ifp->if_oerrors++;
2350 				continue;
2351 			}
2352 		}
2353 
2354 		/*
2355 		 * Filter on the output interface. Pass a NULL bridge interface
2356 		 * pointer so we do not redundantly filter on the bridge for
2357 		 * each interface we broadcast on.
2358 		 */
2359 		if (runfilt && (PFIL_HOOKED(&inet_pfil_hook)
2360 #ifdef INET6
2361 		    || PFIL_HOOKED(&inet6_pfil_hook)
2362 #endif
2363 		    )) {
2364 			if (used == 0) {
2365 				/* Keep the layer3 header aligned */
2366 				i = min(mc->m_pkthdr.len, max_protohdr);
2367 				mc = m_copyup(mc, i, ETHER_ALIGN);
2368 				if (mc == NULL) {
2369 					sc->sc_ifp->if_oerrors++;
2370 					continue;
2371 				}
2372 			}
2373 			if (bridge_pfil(&mc, NULL, dst_if, PFIL_OUT) != 0)
2374 				continue;
2375 			if (mc == NULL)
2376 				continue;
2377 		}
2378 
2379 		bridge_enqueue(sc, dst_if, mc);
2380 	}
2381 	if (used == 0)
2382 		m_freem(m);
2383 
2384 out:
2385 	BRIDGE_UNREF(sc);
2386 }
2387 
2388 /*
2389  * bridge_span:
2390  *
2391  *	Duplicate a packet out one or more interfaces that are in span mode,
2392  *	the original mbuf is unmodified.
2393  */
2394 static void
2395 bridge_span(struct bridge_softc *sc, struct mbuf *m)
2396 {
2397 	struct bridge_iflist *bif;
2398 	struct ifnet *dst_if;
2399 	struct mbuf *mc;
2400 
2401 	if (LIST_EMPTY(&sc->sc_spanlist))
2402 		return;
2403 
2404 	LIST_FOREACH(bif, &sc->sc_spanlist, bif_next) {
2405 		dst_if = bif->bif_ifp;
2406 
2407 		if ((dst_if->if_drv_flags & IFF_DRV_RUNNING) == 0)
2408 			continue;
2409 
2410 		mc = m_copypacket(m, M_DONTWAIT);
2411 		if (mc == NULL) {
2412 			sc->sc_ifp->if_oerrors++;
2413 			continue;
2414 		}
2415 
2416 		bridge_enqueue(sc, dst_if, mc);
2417 	}
2418 }
2419 
2420 /*
2421  * bridge_rtupdate:
2422  *
2423  *	Add a bridge routing entry.
2424  */
2425 static int
2426 bridge_rtupdate(struct bridge_softc *sc, const uint8_t *dst, uint16_t vlan,
2427     struct bridge_iflist *bif, int setflags, uint8_t flags)
2428 {
2429 	struct bridge_rtnode *brt;
2430 	int error;
2431 
2432 	BRIDGE_LOCK_ASSERT(sc);
2433 
2434 	/* Check the source address is valid and not multicast. */
2435 	if (ETHER_IS_MULTICAST(dst) ||
2436 	    (dst[0] == 0 && dst[1] == 0 && dst[2] == 0 &&
2437 	     dst[3] == 0 && dst[4] == 0 && dst[5] == 0) != 0)
2438 		return (EINVAL);
2439 
2440 	/* 802.1p frames map to vlan 1 */
2441 	if (vlan == 0)
2442 		vlan = 1;
2443 
2444 	/*
2445 	 * A route for this destination might already exist.  If so,
2446 	 * update it, otherwise create a new one.
2447 	 */
2448 	if ((brt = bridge_rtnode_lookup(sc, dst, vlan)) == NULL) {
2449 		if (sc->sc_brtcnt >= sc->sc_brtmax) {
2450 			sc->sc_brtexceeded++;
2451 			return (ENOSPC);
2452 		}
2453 		/* Check per interface address limits (if enabled) */
2454 		if (bif->bif_addrmax && bif->bif_addrcnt >= bif->bif_addrmax) {
2455 			bif->bif_addrexceeded++;
2456 			return (ENOSPC);
2457 		}
2458 
2459 		/*
2460 		 * Allocate a new bridge forwarding node, and
2461 		 * initialize the expiration time and Ethernet
2462 		 * address.
2463 		 */
2464 		brt = uma_zalloc(bridge_rtnode_zone, M_NOWAIT | M_ZERO);
2465 		if (brt == NULL)
2466 			return (ENOMEM);
2467 
2468 		if (bif->bif_flags & IFBIF_STICKY)
2469 			brt->brt_flags = IFBAF_STICKY;
2470 		else
2471 			brt->brt_flags = IFBAF_DYNAMIC;
2472 
2473 		memcpy(brt->brt_addr, dst, ETHER_ADDR_LEN);
2474 		brt->brt_vlan = vlan;
2475 
2476 		if ((error = bridge_rtnode_insert(sc, brt)) != 0) {
2477 			uma_zfree(bridge_rtnode_zone, brt);
2478 			return (error);
2479 		}
2480 		brt->brt_dst = bif;
2481 		bif->bif_addrcnt++;
2482 	}
2483 
2484 	if ((brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC &&
2485 	    brt->brt_dst != bif) {
2486 		brt->brt_dst->bif_addrcnt--;
2487 		brt->brt_dst = bif;
2488 		brt->brt_dst->bif_addrcnt++;
2489 	}
2490 
2491 	if ((flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC)
2492 		brt->brt_expire = time_uptime + sc->sc_brttimeout;
2493 	if (setflags)
2494 		brt->brt_flags = flags;
2495 
2496 	return (0);
2497 }
2498 
2499 /*
2500  * bridge_rtlookup:
2501  *
2502  *	Lookup the destination interface for an address.
2503  */
2504 static struct ifnet *
2505 bridge_rtlookup(struct bridge_softc *sc, const uint8_t *addr, uint16_t vlan)
2506 {
2507 	struct bridge_rtnode *brt;
2508 
2509 	BRIDGE_LOCK_ASSERT(sc);
2510 
2511 	if ((brt = bridge_rtnode_lookup(sc, addr, vlan)) == NULL)
2512 		return (NULL);
2513 
2514 	return (brt->brt_ifp);
2515 }
2516 
2517 /*
2518  * bridge_rttrim:
2519  *
2520  *	Trim the routine table so that we have a number
2521  *	of routing entries less than or equal to the
2522  *	maximum number.
2523  */
2524 static void
2525 bridge_rttrim(struct bridge_softc *sc)
2526 {
2527 	struct bridge_rtnode *brt, *nbrt;
2528 
2529 	BRIDGE_LOCK_ASSERT(sc);
2530 
2531 	/* Make sure we actually need to do this. */
2532 	if (sc->sc_brtcnt <= sc->sc_brtmax)
2533 		return;
2534 
2535 	/* Force an aging cycle; this might trim enough addresses. */
2536 	bridge_rtage(sc);
2537 	if (sc->sc_brtcnt <= sc->sc_brtmax)
2538 		return;
2539 
2540 	LIST_FOREACH_SAFE(brt, &sc->sc_rtlist, brt_list, nbrt) {
2541 		if ((brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC) {
2542 			bridge_rtnode_destroy(sc, brt);
2543 			if (sc->sc_brtcnt <= sc->sc_brtmax)
2544 				return;
2545 		}
2546 	}
2547 }
2548 
2549 /*
2550  * bridge_timer:
2551  *
2552  *	Aging timer for the bridge.
2553  */
2554 static void
2555 bridge_timer(void *arg)
2556 {
2557 	struct bridge_softc *sc = arg;
2558 
2559 	BRIDGE_LOCK_ASSERT(sc);
2560 
2561 	bridge_rtage(sc);
2562 
2563 	if (sc->sc_ifp->if_drv_flags & IFF_DRV_RUNNING)
2564 		callout_reset(&sc->sc_brcallout,
2565 		    bridge_rtable_prune_period * hz, bridge_timer, sc);
2566 }
2567 
2568 /*
2569  * bridge_rtage:
2570  *
2571  *	Perform an aging cycle.
2572  */
2573 static void
2574 bridge_rtage(struct bridge_softc *sc)
2575 {
2576 	struct bridge_rtnode *brt, *nbrt;
2577 
2578 	BRIDGE_LOCK_ASSERT(sc);
2579 
2580 	LIST_FOREACH_SAFE(brt, &sc->sc_rtlist, brt_list, nbrt) {
2581 		if ((brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC) {
2582 			if (time_uptime >= brt->brt_expire)
2583 				bridge_rtnode_destroy(sc, brt);
2584 		}
2585 	}
2586 }
2587 
2588 /*
2589  * bridge_rtflush:
2590  *
2591  *	Remove all dynamic addresses from the bridge.
2592  */
2593 static void
2594 bridge_rtflush(struct bridge_softc *sc, int full)
2595 {
2596 	struct bridge_rtnode *brt, *nbrt;
2597 
2598 	BRIDGE_LOCK_ASSERT(sc);
2599 
2600 	LIST_FOREACH_SAFE(brt, &sc->sc_rtlist, brt_list, nbrt) {
2601 		if (full || (brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC)
2602 			bridge_rtnode_destroy(sc, brt);
2603 	}
2604 }
2605 
2606 /*
2607  * bridge_rtdaddr:
2608  *
2609  *	Remove an address from the table.
2610  */
2611 static int
2612 bridge_rtdaddr(struct bridge_softc *sc, const uint8_t *addr, uint16_t vlan)
2613 {
2614 	struct bridge_rtnode *brt;
2615 	int found = 0;
2616 
2617 	BRIDGE_LOCK_ASSERT(sc);
2618 
2619 	/*
2620 	 * If vlan is zero then we want to delete for all vlans so the lookup
2621 	 * may return more than one.
2622 	 */
2623 	while ((brt = bridge_rtnode_lookup(sc, addr, vlan)) != NULL) {
2624 		bridge_rtnode_destroy(sc, brt);
2625 		found = 1;
2626 	}
2627 
2628 	return (found ? 0 : ENOENT);
2629 }
2630 
2631 /*
2632  * bridge_rtdelete:
2633  *
2634  *	Delete routes to a speicifc member interface.
2635  */
2636 static void
2637 bridge_rtdelete(struct bridge_softc *sc, struct ifnet *ifp, int full)
2638 {
2639 	struct bridge_rtnode *brt, *nbrt;
2640 
2641 	BRIDGE_LOCK_ASSERT(sc);
2642 
2643 	LIST_FOREACH_SAFE(brt, &sc->sc_rtlist, brt_list, nbrt) {
2644 		if (brt->brt_ifp == ifp && (full ||
2645 			    (brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC))
2646 			bridge_rtnode_destroy(sc, brt);
2647 	}
2648 }
2649 
2650 /*
2651  * bridge_rtable_init:
2652  *
2653  *	Initialize the route table for this bridge.
2654  */
2655 static int
2656 bridge_rtable_init(struct bridge_softc *sc)
2657 {
2658 	int i;
2659 
2660 	sc->sc_rthash = malloc(sizeof(*sc->sc_rthash) * BRIDGE_RTHASH_SIZE,
2661 	    M_DEVBUF, M_NOWAIT);
2662 	if (sc->sc_rthash == NULL)
2663 		return (ENOMEM);
2664 
2665 	for (i = 0; i < BRIDGE_RTHASH_SIZE; i++)
2666 		LIST_INIT(&sc->sc_rthash[i]);
2667 
2668 	sc->sc_rthash_key = arc4random();
2669 
2670 	LIST_INIT(&sc->sc_rtlist);
2671 
2672 	return (0);
2673 }
2674 
2675 /*
2676  * bridge_rtable_fini:
2677  *
2678  *	Deconstruct the route table for this bridge.
2679  */
2680 static void
2681 bridge_rtable_fini(struct bridge_softc *sc)
2682 {
2683 
2684 	KASSERT(sc->sc_brtcnt == 0,
2685 	    ("%s: %d bridge routes referenced", __func__, sc->sc_brtcnt));
2686 	free(sc->sc_rthash, M_DEVBUF);
2687 }
2688 
2689 /*
2690  * The following hash function is adapted from "Hash Functions" by Bob Jenkins
2691  * ("Algorithm Alley", Dr. Dobbs Journal, September 1997).
2692  */
2693 #define	mix(a, b, c)							\
2694 do {									\
2695 	a -= b; a -= c; a ^= (c >> 13);					\
2696 	b -= c; b -= a; b ^= (a << 8);					\
2697 	c -= a; c -= b; c ^= (b >> 13);					\
2698 	a -= b; a -= c; a ^= (c >> 12);					\
2699 	b -= c; b -= a; b ^= (a << 16);					\
2700 	c -= a; c -= b; c ^= (b >> 5);					\
2701 	a -= b; a -= c; a ^= (c >> 3);					\
2702 	b -= c; b -= a; b ^= (a << 10);					\
2703 	c -= a; c -= b; c ^= (b >> 15);					\
2704 } while (/*CONSTCOND*/0)
2705 
2706 static __inline uint32_t
2707 bridge_rthash(struct bridge_softc *sc, const uint8_t *addr)
2708 {
2709 	uint32_t a = 0x9e3779b9, b = 0x9e3779b9, c = sc->sc_rthash_key;
2710 
2711 	b += addr[5] << 8;
2712 	b += addr[4];
2713 	a += addr[3] << 24;
2714 	a += addr[2] << 16;
2715 	a += addr[1] << 8;
2716 	a += addr[0];
2717 
2718 	mix(a, b, c);
2719 
2720 	return (c & BRIDGE_RTHASH_MASK);
2721 }
2722 
2723 #undef mix
2724 
2725 static int
2726 bridge_rtnode_addr_cmp(const uint8_t *a, const uint8_t *b)
2727 {
2728 	int i, d;
2729 
2730 	for (i = 0, d = 0; i < ETHER_ADDR_LEN && d == 0; i++) {
2731 		d = ((int)a[i]) - ((int)b[i]);
2732 	}
2733 
2734 	return (d);
2735 }
2736 
2737 /*
2738  * bridge_rtnode_lookup:
2739  *
2740  *	Look up a bridge route node for the specified destination. Compare the
2741  *	vlan id or if zero then just return the first match.
2742  */
2743 static struct bridge_rtnode *
2744 bridge_rtnode_lookup(struct bridge_softc *sc, const uint8_t *addr, uint16_t vlan)
2745 {
2746 	struct bridge_rtnode *brt;
2747 	uint32_t hash;
2748 	int dir;
2749 
2750 	BRIDGE_LOCK_ASSERT(sc);
2751 
2752 	hash = bridge_rthash(sc, addr);
2753 	LIST_FOREACH(brt, &sc->sc_rthash[hash], brt_hash) {
2754 		dir = bridge_rtnode_addr_cmp(addr, brt->brt_addr);
2755 		if (dir == 0 && (brt->brt_vlan == vlan || vlan == 0))
2756 			return (brt);
2757 		if (dir > 0)
2758 			return (NULL);
2759 	}
2760 
2761 	return (NULL);
2762 }
2763 
2764 /*
2765  * bridge_rtnode_insert:
2766  *
2767  *	Insert the specified bridge node into the route table.  We
2768  *	assume the entry is not already in the table.
2769  */
2770 static int
2771 bridge_rtnode_insert(struct bridge_softc *sc, struct bridge_rtnode *brt)
2772 {
2773 	struct bridge_rtnode *lbrt;
2774 	uint32_t hash;
2775 	int dir;
2776 
2777 	BRIDGE_LOCK_ASSERT(sc);
2778 
2779 	hash = bridge_rthash(sc, brt->brt_addr);
2780 
2781 	lbrt = LIST_FIRST(&sc->sc_rthash[hash]);
2782 	if (lbrt == NULL) {
2783 		LIST_INSERT_HEAD(&sc->sc_rthash[hash], brt, brt_hash);
2784 		goto out;
2785 	}
2786 
2787 	do {
2788 		dir = bridge_rtnode_addr_cmp(brt->brt_addr, lbrt->brt_addr);
2789 		if (dir == 0 && brt->brt_vlan == lbrt->brt_vlan)
2790 			return (EEXIST);
2791 		if (dir > 0) {
2792 			LIST_INSERT_BEFORE(lbrt, brt, brt_hash);
2793 			goto out;
2794 		}
2795 		if (LIST_NEXT(lbrt, brt_hash) == NULL) {
2796 			LIST_INSERT_AFTER(lbrt, brt, brt_hash);
2797 			goto out;
2798 		}
2799 		lbrt = LIST_NEXT(lbrt, brt_hash);
2800 	} while (lbrt != NULL);
2801 
2802 #ifdef DIAGNOSTIC
2803 	panic("bridge_rtnode_insert: impossible");
2804 #endif
2805 
2806 out:
2807 	LIST_INSERT_HEAD(&sc->sc_rtlist, brt, brt_list);
2808 	sc->sc_brtcnt++;
2809 
2810 	return (0);
2811 }
2812 
2813 /*
2814  * bridge_rtnode_destroy:
2815  *
2816  *	Destroy a bridge rtnode.
2817  */
2818 static void
2819 bridge_rtnode_destroy(struct bridge_softc *sc, struct bridge_rtnode *brt)
2820 {
2821 	BRIDGE_LOCK_ASSERT(sc);
2822 
2823 	LIST_REMOVE(brt, brt_hash);
2824 
2825 	LIST_REMOVE(brt, brt_list);
2826 	sc->sc_brtcnt--;
2827 	brt->brt_dst->bif_addrcnt--;
2828 	uma_zfree(bridge_rtnode_zone, brt);
2829 }
2830 
2831 /*
2832  * bridge_rtable_expire:
2833  *
2834  *	Set the expiry time for all routes on an interface.
2835  */
2836 static void
2837 bridge_rtable_expire(struct ifnet *ifp, int age)
2838 {
2839 	struct bridge_softc *sc = ifp->if_bridge;
2840 	struct bridge_rtnode *brt;
2841 
2842 	BRIDGE_LOCK(sc);
2843 
2844 	/*
2845 	 * If the age is zero then flush, otherwise set all the expiry times to
2846 	 * age for the interface
2847 	 */
2848 	if (age == 0)
2849 		bridge_rtdelete(sc, ifp, IFBF_FLUSHDYN);
2850 	else {
2851 		LIST_FOREACH(brt, &sc->sc_rtlist, brt_list) {
2852 			/* Cap the expiry time to 'age' */
2853 			if (brt->brt_ifp == ifp &&
2854 			    brt->brt_expire > time_uptime + age &&
2855 			    (brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC)
2856 				brt->brt_expire = time_uptime + age;
2857 		}
2858 	}
2859 	BRIDGE_UNLOCK(sc);
2860 }
2861 
2862 /*
2863  * bridge_state_change:
2864  *
2865  *	Callback from the bridgestp code when a port changes states.
2866  */
2867 static void
2868 bridge_state_change(struct ifnet *ifp, int state)
2869 {
2870 	struct bridge_softc *sc = ifp->if_bridge;
2871 	static const char *stpstates[] = {
2872 		"disabled",
2873 		"listening",
2874 		"learning",
2875 		"forwarding",
2876 		"blocking",
2877 		"discarding"
2878 	};
2879 
2880 	if (log_stp)
2881 		log(LOG_NOTICE, "%s: state changed to %s on %s\n",
2882 		    sc->sc_ifp->if_xname, stpstates[state], ifp->if_xname);
2883 }
2884 
2885 /*
2886  * Send bridge packets through pfil if they are one of the types pfil can deal
2887  * with, or if they are ARP or REVARP.  (pfil will pass ARP and REVARP without
2888  * question.) If *bifp or *ifp are NULL then packet filtering is skipped for
2889  * that interface.
2890  */
2891 static int
2892 bridge_pfil(struct mbuf **mp, struct ifnet *bifp, struct ifnet *ifp, int dir)
2893 {
2894 	int snap, error, i, hlen;
2895 	struct ether_header *eh1, eh2;
2896 	struct ip_fw_args args;
2897 	struct ip *ip;
2898 	struct llc llc1;
2899 	u_int16_t ether_type;
2900 
2901 	snap = 0;
2902 	error = -1;	/* Default error if not error == 0 */
2903 
2904 #if 0
2905 	/* we may return with the IP fields swapped, ensure its not shared */
2906 	KASSERT(M_WRITABLE(*mp), ("%s: modifying a shared mbuf", __func__));
2907 #endif
2908 
2909 	if (pfil_bridge == 0 && pfil_member == 0 && pfil_ipfw == 0)
2910 		return (0); /* filtering is disabled */
2911 
2912 	i = min((*mp)->m_pkthdr.len, max_protohdr);
2913 	if ((*mp)->m_len < i) {
2914 	    *mp = m_pullup(*mp, i);
2915 	    if (*mp == NULL) {
2916 		printf("%s: m_pullup failed\n", __func__);
2917 		return (-1);
2918 	    }
2919 	}
2920 
2921 	eh1 = mtod(*mp, struct ether_header *);
2922 	ether_type = ntohs(eh1->ether_type);
2923 
2924 	/*
2925 	 * Check for SNAP/LLC.
2926 	 */
2927 	if (ether_type < ETHERMTU) {
2928 		struct llc *llc2 = (struct llc *)(eh1 + 1);
2929 
2930 		if ((*mp)->m_len >= ETHER_HDR_LEN + 8 &&
2931 		    llc2->llc_dsap == LLC_SNAP_LSAP &&
2932 		    llc2->llc_ssap == LLC_SNAP_LSAP &&
2933 		    llc2->llc_control == LLC_UI) {
2934 			ether_type = htons(llc2->llc_un.type_snap.ether_type);
2935 			snap = 1;
2936 		}
2937 	}
2938 
2939 	/*
2940 	 * If we're trying to filter bridge traffic, don't look at anything
2941 	 * other than IP and ARP traffic.  If the filter doesn't understand
2942 	 * IPv6, don't allow IPv6 through the bridge either.  This is lame
2943 	 * since if we really wanted, say, an AppleTalk filter, we are hosed,
2944 	 * but of course we don't have an AppleTalk filter to begin with.
2945 	 * (Note that since pfil doesn't understand ARP it will pass *ALL*
2946 	 * ARP traffic.)
2947 	 */
2948 	switch (ether_type) {
2949 		case ETHERTYPE_ARP:
2950 		case ETHERTYPE_REVARP:
2951 			if (pfil_ipfw_arp == 0)
2952 				return (0); /* Automatically pass */
2953 			break;
2954 
2955 		case ETHERTYPE_IP:
2956 #ifdef INET6
2957 		case ETHERTYPE_IPV6:
2958 #endif /* INET6 */
2959 			break;
2960 		default:
2961 			/*
2962 			 * Check to see if the user wants to pass non-ip
2963 			 * packets, these will not be checked by pfil(9) and
2964 			 * passed unconditionally so the default is to drop.
2965 			 */
2966 			if (pfil_onlyip)
2967 				goto bad;
2968 	}
2969 
2970 	/* Strip off the Ethernet header and keep a copy. */
2971 	m_copydata(*mp, 0, ETHER_HDR_LEN, (caddr_t) &eh2);
2972 	m_adj(*mp, ETHER_HDR_LEN);
2973 
2974 	/* Strip off snap header, if present */
2975 	if (snap) {
2976 		m_copydata(*mp, 0, sizeof(struct llc), (caddr_t) &llc1);
2977 		m_adj(*mp, sizeof(struct llc));
2978 	}
2979 
2980 	/*
2981 	 * Check the IP header for alignment and errors
2982 	 */
2983 	if (dir == PFIL_IN) {
2984 		switch (ether_type) {
2985 			case ETHERTYPE_IP:
2986 				error = bridge_ip_checkbasic(mp);
2987 				break;
2988 #ifdef INET6
2989 			case ETHERTYPE_IPV6:
2990 				error = bridge_ip6_checkbasic(mp);
2991 				break;
2992 #endif /* INET6 */
2993 			default:
2994 				error = 0;
2995 		}
2996 		if (error)
2997 			goto bad;
2998 	}
2999 
3000 	if (IPFW_LOADED && pfil_ipfw != 0 && dir == PFIL_OUT && ifp != NULL) {
3001 		error = -1;
3002 		args.rule = ip_dn_claim_rule(*mp);
3003 		if (args.rule != NULL && fw_one_pass)
3004 			goto ipfwpass; /* packet already partially processed */
3005 
3006 		args.m = *mp;
3007 		args.oif = ifp;
3008 		args.next_hop = NULL;
3009 		args.eh = &eh2;
3010 		args.inp = NULL;	/* used by ipfw uid/gid/jail rules */
3011 		i = ip_fw_chk_ptr(&args);
3012 		*mp = args.m;
3013 
3014 		if (*mp == NULL)
3015 			return (error);
3016 
3017 		if (DUMMYNET_LOADED && (i == IP_FW_DUMMYNET)) {
3018 
3019 			/* put the Ethernet header back on */
3020 			M_PREPEND(*mp, ETHER_HDR_LEN, M_DONTWAIT);
3021 			if (*mp == NULL)
3022 				return (error);
3023 			bcopy(&eh2, mtod(*mp, caddr_t), ETHER_HDR_LEN);
3024 
3025 			/*
3026 			 * Pass the pkt to dummynet, which consumes it. The
3027 			 * packet will return to us via bridge_dummynet().
3028 			 */
3029 			args.oif = ifp;
3030 			ip_dn_io_ptr(mp, DN_TO_IFB_FWD, &args);
3031 			return (error);
3032 		}
3033 
3034 		if (i != IP_FW_PASS) /* drop */
3035 			goto bad;
3036 	}
3037 
3038 ipfwpass:
3039 	error = 0;
3040 
3041 	/*
3042 	 * Run the packet through pfil
3043 	 */
3044 	switch (ether_type) {
3045 	case ETHERTYPE_IP:
3046 		/*
3047 		 * before calling the firewall, swap fields the same as
3048 		 * IP does. here we assume the header is contiguous
3049 		 */
3050 		ip = mtod(*mp, struct ip *);
3051 
3052 		ip->ip_len = ntohs(ip->ip_len);
3053 		ip->ip_off = ntohs(ip->ip_off);
3054 
3055 		/*
3056 		 * Run pfil on the member interface and the bridge, both can
3057 		 * be skipped by clearing pfil_member or pfil_bridge.
3058 		 *
3059 		 * Keep the order:
3060 		 *   in_if -> bridge_if -> out_if
3061 		 */
3062 		if (pfil_bridge && dir == PFIL_OUT && bifp != NULL)
3063 			error = pfil_run_hooks(&inet_pfil_hook, mp, bifp,
3064 					dir, NULL);
3065 
3066 		if (*mp == NULL || error != 0) /* filter may consume */
3067 			break;
3068 
3069 		if (pfil_member && ifp != NULL)
3070 			error = pfil_run_hooks(&inet_pfil_hook, mp, ifp,
3071 					dir, NULL);
3072 
3073 		if (*mp == NULL || error != 0) /* filter may consume */
3074 			break;
3075 
3076 		if (pfil_bridge && dir == PFIL_IN && bifp != NULL)
3077 			error = pfil_run_hooks(&inet_pfil_hook, mp, bifp,
3078 					dir, NULL);
3079 
3080 		if (*mp == NULL || error != 0) /* filter may consume */
3081 			break;
3082 
3083 		/* check if we need to fragment the packet */
3084 		if (pfil_member && ifp != NULL && dir == PFIL_OUT) {
3085 			i = (*mp)->m_pkthdr.len;
3086 			if (i > ifp->if_mtu) {
3087 				error = bridge_fragment(ifp, *mp, &eh2, snap,
3088 					    &llc1);
3089 				return (error);
3090 			}
3091 		}
3092 
3093 		/* Recalculate the ip checksum and restore byte ordering */
3094 		ip = mtod(*mp, struct ip *);
3095 		hlen = ip->ip_hl << 2;
3096 		if (hlen < sizeof(struct ip))
3097 			goto bad;
3098 		if (hlen > (*mp)->m_len) {
3099 			if ((*mp = m_pullup(*mp, hlen)) == 0)
3100 				goto bad;
3101 			ip = mtod(*mp, struct ip *);
3102 			if (ip == NULL)
3103 				goto bad;
3104 		}
3105 		ip->ip_len = htons(ip->ip_len);
3106 		ip->ip_off = htons(ip->ip_off);
3107 		ip->ip_sum = 0;
3108 		if (hlen == sizeof(struct ip))
3109 			ip->ip_sum = in_cksum_hdr(ip);
3110 		else
3111 			ip->ip_sum = in_cksum(*mp, hlen);
3112 
3113 		break;
3114 #ifdef INET6
3115 	case ETHERTYPE_IPV6:
3116 		if (pfil_bridge && dir == PFIL_OUT && bifp != NULL)
3117 			error = pfil_run_hooks(&inet6_pfil_hook, mp, bifp,
3118 					dir, NULL);
3119 
3120 		if (*mp == NULL || error != 0) /* filter may consume */
3121 			break;
3122 
3123 		if (pfil_member && ifp != NULL)
3124 			error = pfil_run_hooks(&inet6_pfil_hook, mp, ifp,
3125 					dir, NULL);
3126 
3127 		if (*mp == NULL || error != 0) /* filter may consume */
3128 			break;
3129 
3130 		if (pfil_bridge && dir == PFIL_IN && bifp != NULL)
3131 			error = pfil_run_hooks(&inet6_pfil_hook, mp, bifp,
3132 					dir, NULL);
3133 		break;
3134 #endif
3135 	default:
3136 		error = 0;
3137 		break;
3138 	}
3139 
3140 	if (*mp == NULL)
3141 		return (error);
3142 	if (error != 0)
3143 		goto bad;
3144 
3145 	error = -1;
3146 
3147 	/*
3148 	 * Finally, put everything back the way it was and return
3149 	 */
3150 	if (snap) {
3151 		M_PREPEND(*mp, sizeof(struct llc), M_DONTWAIT);
3152 		if (*mp == NULL)
3153 			return (error);
3154 		bcopy(&llc1, mtod(*mp, caddr_t), sizeof(struct llc));
3155 	}
3156 
3157 	M_PREPEND(*mp, ETHER_HDR_LEN, M_DONTWAIT);
3158 	if (*mp == NULL)
3159 		return (error);
3160 	bcopy(&eh2, mtod(*mp, caddr_t), ETHER_HDR_LEN);
3161 
3162 	return (0);
3163 
3164 bad:
3165 	m_freem(*mp);
3166 	*mp = NULL;
3167 	return (error);
3168 }
3169 
3170 /*
3171  * Perform basic checks on header size since
3172  * pfil assumes ip_input has already processed
3173  * it for it.  Cut-and-pasted from ip_input.c.
3174  * Given how simple the IPv6 version is,
3175  * does the IPv4 version really need to be
3176  * this complicated?
3177  *
3178  * XXX Should we update ipstat here, or not?
3179  * XXX Right now we update ipstat but not
3180  * XXX csum_counter.
3181  */
3182 static int
3183 bridge_ip_checkbasic(struct mbuf **mp)
3184 {
3185 	struct mbuf *m = *mp;
3186 	struct ip *ip;
3187 	int len, hlen;
3188 	u_short sum;
3189 
3190 	if (*mp == NULL)
3191 		return (-1);
3192 
3193 	if (IP_HDR_ALIGNED_P(mtod(m, caddr_t)) == 0) {
3194 		if ((m = m_copyup(m, sizeof(struct ip),
3195 			(max_linkhdr + 3) & ~3)) == NULL) {
3196 			/* XXXJRT new stat, please */
3197 			ipstat.ips_toosmall++;
3198 			goto bad;
3199 		}
3200 	} else if (__predict_false(m->m_len < sizeof (struct ip))) {
3201 		if ((m = m_pullup(m, sizeof (struct ip))) == NULL) {
3202 			ipstat.ips_toosmall++;
3203 			goto bad;
3204 		}
3205 	}
3206 	ip = mtod(m, struct ip *);
3207 	if (ip == NULL) goto bad;
3208 
3209 	if (ip->ip_v != IPVERSION) {
3210 		ipstat.ips_badvers++;
3211 		goto bad;
3212 	}
3213 	hlen = ip->ip_hl << 2;
3214 	if (hlen < sizeof(struct ip)) { /* minimum header length */
3215 		ipstat.ips_badhlen++;
3216 		goto bad;
3217 	}
3218 	if (hlen > m->m_len) {
3219 		if ((m = m_pullup(m, hlen)) == 0) {
3220 			ipstat.ips_badhlen++;
3221 			goto bad;
3222 		}
3223 		ip = mtod(m, struct ip *);
3224 		if (ip == NULL) goto bad;
3225 	}
3226 
3227 	if (m->m_pkthdr.csum_flags & CSUM_IP_CHECKED) {
3228 		sum = !(m->m_pkthdr.csum_flags & CSUM_IP_VALID);
3229 	} else {
3230 		if (hlen == sizeof(struct ip)) {
3231 			sum = in_cksum_hdr(ip);
3232 		} else {
3233 			sum = in_cksum(m, hlen);
3234 		}
3235 	}
3236 	if (sum) {
3237 		ipstat.ips_badsum++;
3238 		goto bad;
3239 	}
3240 
3241 	/* Retrieve the packet length. */
3242 	len = ntohs(ip->ip_len);
3243 
3244 	/*
3245 	 * Check for additional length bogosity
3246 	 */
3247 	if (len < hlen) {
3248 		ipstat.ips_badlen++;
3249 		goto bad;
3250 	}
3251 
3252 	/*
3253 	 * Check that the amount of data in the buffers
3254 	 * is as at least much as the IP header would have us expect.
3255 	 * Drop packet if shorter than we expect.
3256 	 */
3257 	if (m->m_pkthdr.len < len) {
3258 		ipstat.ips_tooshort++;
3259 		goto bad;
3260 	}
3261 
3262 	/* Checks out, proceed */
3263 	*mp = m;
3264 	return (0);
3265 
3266 bad:
3267 	*mp = m;
3268 	return (-1);
3269 }
3270 
3271 #ifdef INET6
3272 /*
3273  * Same as above, but for IPv6.
3274  * Cut-and-pasted from ip6_input.c.
3275  * XXX Should we update ip6stat, or not?
3276  */
3277 static int
3278 bridge_ip6_checkbasic(struct mbuf **mp)
3279 {
3280 	struct mbuf *m = *mp;
3281 	struct ip6_hdr *ip6;
3282 
3283 	/*
3284 	 * If the IPv6 header is not aligned, slurp it up into a new
3285 	 * mbuf with space for link headers, in the event we forward
3286 	 * it.  Otherwise, if it is aligned, make sure the entire base
3287 	 * IPv6 header is in the first mbuf of the chain.
3288 	 */
3289 	if (IP6_HDR_ALIGNED_P(mtod(m, caddr_t)) == 0) {
3290 		struct ifnet *inifp = m->m_pkthdr.rcvif;
3291 		if ((m = m_copyup(m, sizeof(struct ip6_hdr),
3292 			    (max_linkhdr + 3) & ~3)) == NULL) {
3293 			/* XXXJRT new stat, please */
3294 			ip6stat.ip6s_toosmall++;
3295 			in6_ifstat_inc(inifp, ifs6_in_hdrerr);
3296 			goto bad;
3297 		}
3298 	} else if (__predict_false(m->m_len < sizeof(struct ip6_hdr))) {
3299 		struct ifnet *inifp = m->m_pkthdr.rcvif;
3300 		if ((m = m_pullup(m, sizeof(struct ip6_hdr))) == NULL) {
3301 			ip6stat.ip6s_toosmall++;
3302 			in6_ifstat_inc(inifp, ifs6_in_hdrerr);
3303 			goto bad;
3304 		}
3305 	}
3306 
3307 	ip6 = mtod(m, struct ip6_hdr *);
3308 
3309 	if ((ip6->ip6_vfc & IPV6_VERSION_MASK) != IPV6_VERSION) {
3310 		ip6stat.ip6s_badvers++;
3311 		in6_ifstat_inc(m->m_pkthdr.rcvif, ifs6_in_hdrerr);
3312 		goto bad;
3313 	}
3314 
3315 	/* Checks out, proceed */
3316 	*mp = m;
3317 	return (0);
3318 
3319 bad:
3320 	*mp = m;
3321 	return (-1);
3322 }
3323 #endif /* INET6 */
3324 
3325 /*
3326  * bridge_fragment:
3327  *
3328  *	Return a fragmented mbuf chain.
3329  */
3330 static int
3331 bridge_fragment(struct ifnet *ifp, struct mbuf *m, struct ether_header *eh,
3332     int snap, struct llc *llc)
3333 {
3334 	struct mbuf *m0;
3335 	struct ip *ip;
3336 	int error = -1;
3337 
3338 	if (m->m_len < sizeof(struct ip) &&
3339 	    (m = m_pullup(m, sizeof(struct ip))) == NULL)
3340 		goto out;
3341 	ip = mtod(m, struct ip *);
3342 
3343 	error = ip_fragment(ip, &m, ifp->if_mtu, ifp->if_hwassist,
3344 		    CSUM_DELAY_IP);
3345 	if (error)
3346 		goto out;
3347 
3348 	/* walk the chain and re-add the Ethernet header */
3349 	for (m0 = m; m0; m0 = m0->m_nextpkt) {
3350 		if (error == 0) {
3351 			if (snap) {
3352 				M_PREPEND(m0, sizeof(struct llc), M_DONTWAIT);
3353 				if (m0 == NULL) {
3354 					error = ENOBUFS;
3355 					continue;
3356 				}
3357 				bcopy(llc, mtod(m0, caddr_t),
3358 				    sizeof(struct llc));
3359 			}
3360 			M_PREPEND(m0, ETHER_HDR_LEN, M_DONTWAIT);
3361 			if (m0 == NULL) {
3362 				error = ENOBUFS;
3363 				continue;
3364 			}
3365 			bcopy(eh, mtod(m0, caddr_t), ETHER_HDR_LEN);
3366 		} else
3367 			m_freem(m);
3368 	}
3369 
3370 	if (error == 0)
3371 		ipstat.ips_fragmented++;
3372 
3373 	return (error);
3374 
3375 out:
3376 	if (m != NULL)
3377 		m_freem(m);
3378 	return (error);
3379 }
3380