xref: /freebsd/sys/net/if_bridge.c (revision 9f0c02d4255b2036f652c924d3df4fa88c7c721a)
1 /*	$NetBSD: if_bridge.c,v 1.31 2005/06/01 19:45:34 jdc Exp $	*/
2 
3 /*
4  * Copyright 2001 Wasabi Systems, Inc.
5  * All rights reserved.
6  *
7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. All advertising materials mentioning features or use of this software
18  *    must display the following acknowledgement:
19  *	This product includes software developed for the NetBSD Project by
20  *	Wasabi Systems, Inc.
21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22  *    or promote products derived from this software without specific prior
23  *    written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35  * POSSIBILITY OF SUCH DAMAGE.
36  */
37 
38 /*
39  * Copyright (c) 1999, 2000 Jason L. Wright (jason@thought.net)
40  * All rights reserved.
41  *
42  * Redistribution and use in source and binary forms, with or without
43  * modification, are permitted provided that the following conditions
44  * are met:
45  * 1. Redistributions of source code must retain the above copyright
46  *    notice, this list of conditions and the following disclaimer.
47  * 2. Redistributions in binary form must reproduce the above copyright
48  *    notice, this list of conditions and the following disclaimer in the
49  *    documentation and/or other materials provided with the distribution.
50  * 3. All advertising materials mentioning features or use of this software
51  *    must display the following acknowledgement:
52  *	This product includes software developed by Jason L. Wright
53  * 4. The name of the author may not be used to endorse or promote products
54  *    derived from this software without specific prior written permission.
55  *
56  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
57  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
58  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
59  * DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
60  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
61  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
62  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
63  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
64  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
65  * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
66  * POSSIBILITY OF SUCH DAMAGE.
67  *
68  * OpenBSD: if_bridge.c,v 1.60 2001/06/15 03:38:33 itojun Exp
69  */
70 
71 /*
72  * Network interface bridge support.
73  *
74  * TODO:
75  *
76  *	- Currently only supports Ethernet-like interfaces (Ethernet,
77  *	  802.11, VLANs on Ethernet, etc.)  Figure out a nice way
78  *	  to bridge other types of interfaces (FDDI-FDDI, and maybe
79  *	  consider heterogenous bridges).
80  */
81 
82 #include <sys/cdefs.h>
83 __FBSDID("$FreeBSD$");
84 
85 #include "opt_inet.h"
86 #include "opt_inet6.h"
87 #include "opt_carp.h"
88 
89 #include <sys/param.h>
90 #include <sys/mbuf.h>
91 #include <sys/malloc.h>
92 #include <sys/protosw.h>
93 #include <sys/systm.h>
94 #include <sys/time.h>
95 #include <sys/socket.h> /* for net/if.h */
96 #include <sys/sockio.h>
97 #include <sys/ctype.h>  /* string functions */
98 #include <sys/kernel.h>
99 #include <sys/random.h>
100 #include <sys/syslog.h>
101 #include <sys/sysctl.h>
102 #include <vm/uma.h>
103 #include <sys/module.h>
104 #include <sys/priv.h>
105 #include <sys/proc.h>
106 #include <sys/lock.h>
107 #include <sys/mutex.h>
108 
109 #include <net/bpf.h>
110 #include <net/if.h>
111 #include <net/if_clone.h>
112 #include <net/if_dl.h>
113 #include <net/if_types.h>
114 #include <net/if_var.h>
115 #include <net/pfil.h>
116 
117 #include <netinet/in.h> /* for struct arpcom */
118 #include <netinet/in_systm.h>
119 #include <netinet/in_var.h>
120 #include <netinet/ip.h>
121 #include <netinet/ip_var.h>
122 #ifdef INET6
123 #include <netinet/ip6.h>
124 #include <netinet6/ip6_var.h>
125 #endif
126 #ifdef DEV_CARP
127 #include <netinet/ip_carp.h>
128 #endif
129 #include <machine/in_cksum.h>
130 #include <netinet/if_ether.h> /* for struct arpcom */
131 #include <net/bridgestp.h>
132 #include <net/if_bridgevar.h>
133 #include <net/if_llc.h>
134 #include <net/if_vlan_var.h>
135 
136 #include <net/route.h>
137 #include <netinet/ip_fw.h>
138 #include <netinet/ip_dummynet.h>
139 
140 /*
141  * Size of the route hash table.  Must be a power of two.
142  */
143 #ifndef BRIDGE_RTHASH_SIZE
144 #define	BRIDGE_RTHASH_SIZE		1024
145 #endif
146 
147 #define	BRIDGE_RTHASH_MASK		(BRIDGE_RTHASH_SIZE - 1)
148 
149 /*
150  * Maximum number of addresses to cache.
151  */
152 #ifndef BRIDGE_RTABLE_MAX
153 #define	BRIDGE_RTABLE_MAX		100
154 #endif
155 
156 /*
157  * Timeout (in seconds) for entries learned dynamically.
158  */
159 #ifndef BRIDGE_RTABLE_TIMEOUT
160 #define	BRIDGE_RTABLE_TIMEOUT		(20 * 60)	/* same as ARP */
161 #endif
162 
163 /*
164  * Number of seconds between walks of the route list.
165  */
166 #ifndef BRIDGE_RTABLE_PRUNE_PERIOD
167 #define	BRIDGE_RTABLE_PRUNE_PERIOD	(5 * 60)
168 #endif
169 
170 /*
171  * List of capabilities to mask on the member interface.
172  */
173 #define	BRIDGE_IFCAPS_MASK		IFCAP_TXCSUM
174 
175 /*
176  * Bridge interface list entry.
177  */
178 struct bridge_iflist {
179 	LIST_ENTRY(bridge_iflist) bif_next;
180 	struct ifnet		*bif_ifp;	/* member if */
181 	struct bstp_port	bif_stp;	/* STP state */
182 	uint32_t		bif_flags;	/* member if flags */
183 	int			bif_mutecap;	/* member muted caps */
184 	uint32_t		bif_addrmax;	/* max # of addresses */
185 	uint32_t		bif_addrcnt;	/* cur. # of addresses */
186 	uint32_t		bif_addrexceeded;/* # of address violations */
187 };
188 
189 /*
190  * Bridge route node.
191  */
192 struct bridge_rtnode {
193 	LIST_ENTRY(bridge_rtnode) brt_hash;	/* hash table linkage */
194 	LIST_ENTRY(bridge_rtnode) brt_list;	/* list linkage */
195 	struct bridge_iflist	*brt_dst;	/* destination if */
196 	unsigned long		brt_expire;	/* expiration time */
197 	uint8_t			brt_flags;	/* address flags */
198 	uint8_t			brt_addr[ETHER_ADDR_LEN];
199 	uint16_t		brt_vlan;	/* vlan id */
200 };
201 #define	brt_ifp			brt_dst->bif_ifp
202 
203 /*
204  * Software state for each bridge.
205  */
206 struct bridge_softc {
207 	struct ifnet		*sc_ifp;	/* make this an interface */
208 	LIST_ENTRY(bridge_softc) sc_list;
209 	struct mtx		sc_mtx;
210 	struct cv		sc_cv;
211 	uint32_t		sc_brtmax;	/* max # of addresses */
212 	uint32_t		sc_brtcnt;	/* cur. # of addresses */
213 	uint32_t		sc_brttimeout;	/* rt timeout in seconds */
214 	struct callout		sc_brcallout;	/* bridge callout */
215 	uint32_t		sc_iflist_ref;	/* refcount for sc_iflist */
216 	uint32_t		sc_iflist_xcnt;	/* refcount for sc_iflist */
217 	LIST_HEAD(, bridge_iflist) sc_iflist;	/* member interface list */
218 	LIST_HEAD(, bridge_rtnode) *sc_rthash;	/* our forwarding table */
219 	LIST_HEAD(, bridge_rtnode) sc_rtlist;	/* list version of above */
220 	uint32_t		sc_rthash_key;	/* key for hash */
221 	LIST_HEAD(, bridge_iflist) sc_spanlist;	/* span ports list */
222 	struct bstp_state	sc_stp;		/* STP state */
223 	uint32_t		sc_brtexceeded;	/* # of cache drops */
224 };
225 
226 static struct mtx 	bridge_list_mtx;
227 eventhandler_tag	bridge_detach_cookie = NULL;
228 
229 int	bridge_rtable_prune_period = BRIDGE_RTABLE_PRUNE_PERIOD;
230 
231 uma_zone_t bridge_rtnode_zone;
232 
233 static int	bridge_clone_create(struct if_clone *, int, caddr_t);
234 static void	bridge_clone_destroy(struct ifnet *);
235 
236 static int	bridge_ioctl(struct ifnet *, u_long, caddr_t);
237 static void	bridge_mutecaps(struct bridge_iflist *, int);
238 static void	bridge_ifdetach(void *arg __unused, struct ifnet *);
239 static void	bridge_init(void *);
240 static void	bridge_dummynet(struct mbuf *, struct ifnet *);
241 static void	bridge_stop(struct ifnet *, int);
242 static void	bridge_start(struct ifnet *);
243 static struct mbuf *bridge_input(struct ifnet *, struct mbuf *);
244 static int	bridge_output(struct ifnet *, struct mbuf *, struct sockaddr *,
245 		    struct rtentry *);
246 static void	bridge_enqueue(struct bridge_softc *, struct ifnet *,
247 		    struct mbuf *);
248 static void	bridge_rtdelete(struct bridge_softc *, struct ifnet *ifp, int);
249 
250 static void	bridge_forward(struct bridge_softc *, struct bridge_iflist *,
251 		    struct mbuf *m);
252 
253 static void	bridge_timer(void *);
254 
255 static void	bridge_broadcast(struct bridge_softc *, struct ifnet *,
256 		    struct mbuf *, int);
257 static void	bridge_span(struct bridge_softc *, struct mbuf *);
258 
259 static int	bridge_rtupdate(struct bridge_softc *, const uint8_t *,
260 		    uint16_t, struct bridge_iflist *, int, uint8_t);
261 static struct ifnet *bridge_rtlookup(struct bridge_softc *, const uint8_t *,
262 		    uint16_t);
263 static void	bridge_rttrim(struct bridge_softc *);
264 static void	bridge_rtage(struct bridge_softc *);
265 static void	bridge_rtflush(struct bridge_softc *, int);
266 static int	bridge_rtdaddr(struct bridge_softc *, const uint8_t *,
267 		    uint16_t);
268 
269 static int	bridge_rtable_init(struct bridge_softc *);
270 static void	bridge_rtable_fini(struct bridge_softc *);
271 
272 static int	bridge_rtnode_addr_cmp(const uint8_t *, const uint8_t *);
273 static struct bridge_rtnode *bridge_rtnode_lookup(struct bridge_softc *,
274 		    const uint8_t *, uint16_t);
275 static int	bridge_rtnode_insert(struct bridge_softc *,
276 		    struct bridge_rtnode *);
277 static void	bridge_rtnode_destroy(struct bridge_softc *,
278 		    struct bridge_rtnode *);
279 static void	bridge_rtable_expire(struct ifnet *, int);
280 static void	bridge_state_change(struct ifnet *, int);
281 
282 static struct bridge_iflist *bridge_lookup_member(struct bridge_softc *,
283 		    const char *name);
284 static struct bridge_iflist *bridge_lookup_member_if(struct bridge_softc *,
285 		    struct ifnet *ifp);
286 static void	bridge_delete_member(struct bridge_softc *,
287 		    struct bridge_iflist *, int);
288 static void	bridge_delete_span(struct bridge_softc *,
289 		    struct bridge_iflist *);
290 
291 static int	bridge_ioctl_add(struct bridge_softc *, void *);
292 static int	bridge_ioctl_del(struct bridge_softc *, void *);
293 static int	bridge_ioctl_gifflags(struct bridge_softc *, void *);
294 static int	bridge_ioctl_sifflags(struct bridge_softc *, void *);
295 static int	bridge_ioctl_scache(struct bridge_softc *, void *);
296 static int	bridge_ioctl_gcache(struct bridge_softc *, void *);
297 static int	bridge_ioctl_gifs(struct bridge_softc *, void *);
298 static int	bridge_ioctl_rts(struct bridge_softc *, void *);
299 static int	bridge_ioctl_saddr(struct bridge_softc *, void *);
300 static int	bridge_ioctl_sto(struct bridge_softc *, void *);
301 static int	bridge_ioctl_gto(struct bridge_softc *, void *);
302 static int	bridge_ioctl_daddr(struct bridge_softc *, void *);
303 static int	bridge_ioctl_flush(struct bridge_softc *, void *);
304 static int	bridge_ioctl_gpri(struct bridge_softc *, void *);
305 static int	bridge_ioctl_spri(struct bridge_softc *, void *);
306 static int	bridge_ioctl_ght(struct bridge_softc *, void *);
307 static int	bridge_ioctl_sht(struct bridge_softc *, void *);
308 static int	bridge_ioctl_gfd(struct bridge_softc *, void *);
309 static int	bridge_ioctl_sfd(struct bridge_softc *, void *);
310 static int	bridge_ioctl_gma(struct bridge_softc *, void *);
311 static int	bridge_ioctl_sma(struct bridge_softc *, void *);
312 static int	bridge_ioctl_sifprio(struct bridge_softc *, void *);
313 static int	bridge_ioctl_sifcost(struct bridge_softc *, void *);
314 static int	bridge_ioctl_sifmaxaddr(struct bridge_softc *, void *);
315 static int	bridge_ioctl_addspan(struct bridge_softc *, void *);
316 static int	bridge_ioctl_delspan(struct bridge_softc *, void *);
317 static int	bridge_ioctl_gbparam(struct bridge_softc *, void *);
318 static int	bridge_ioctl_grte(struct bridge_softc *, void *);
319 static int	bridge_ioctl_gifsstp(struct bridge_softc *, void *);
320 static int	bridge_ioctl_sproto(struct bridge_softc *, void *);
321 static int	bridge_ioctl_stxhc(struct bridge_softc *, void *);
322 static int	bridge_pfil(struct mbuf **, struct ifnet *, struct ifnet *,
323 		    int);
324 static int	bridge_ip_checkbasic(struct mbuf **mp);
325 #ifdef INET6
326 static int	bridge_ip6_checkbasic(struct mbuf **mp);
327 #endif /* INET6 */
328 static int	bridge_fragment(struct ifnet *, struct mbuf *,
329 		    struct ether_header *, int, struct llc *);
330 
331 /* The default bridge vlan is 1 (IEEE 802.1Q-2003 Table 9-2) */
332 #define	VLANTAGOF(_m)	\
333     (_m->m_flags & M_VLANTAG) ? EVL_VLANOFTAG(_m->m_pkthdr.ether_vtag) : 1
334 
335 static struct bstp_cb_ops bridge_ops = {
336 	.bcb_state = bridge_state_change,
337 	.bcb_rtage = bridge_rtable_expire
338 };
339 
340 SYSCTL_DECL(_net_link);
341 SYSCTL_NODE(_net_link, IFT_BRIDGE, bridge, CTLFLAG_RW, 0, "Bridge");
342 
343 static int pfil_onlyip = 1; /* only pass IP[46] packets when pfil is enabled */
344 static int pfil_bridge = 1; /* run pfil hooks on the bridge interface */
345 static int pfil_member = 1; /* run pfil hooks on the member interface */
346 static int pfil_ipfw = 0;   /* layer2 filter with ipfw */
347 static int pfil_ipfw_arp = 0;   /* layer2 filter with ipfw */
348 static int pfil_local_phys = 0; /* run pfil hooks on the physical interface for
349                                    locally destined packets */
350 static int log_stp   = 0;   /* log STP state changes */
351 SYSCTL_INT(_net_link_bridge, OID_AUTO, pfil_onlyip, CTLFLAG_RW,
352     &pfil_onlyip, 0, "Only pass IP packets when pfil is enabled");
353 SYSCTL_INT(_net_link_bridge, OID_AUTO, ipfw_arp, CTLFLAG_RW,
354     &pfil_ipfw_arp, 0, "Filter ARP packets through IPFW layer2");
355 SYSCTL_INT(_net_link_bridge, OID_AUTO, pfil_bridge, CTLFLAG_RW,
356     &pfil_bridge, 0, "Packet filter on the bridge interface");
357 SYSCTL_INT(_net_link_bridge, OID_AUTO, pfil_member, CTLFLAG_RW,
358     &pfil_member, 0, "Packet filter on the member interface");
359 SYSCTL_INT(_net_link_bridge, OID_AUTO, pfil_local_phys, CTLFLAG_RW,
360     &pfil_local_phys, 0,
361     "Packet filter on the physical interface for locally destined packets");
362 SYSCTL_INT(_net_link_bridge, OID_AUTO, log_stp, CTLFLAG_RW,
363     &log_stp, 0, "Log STP state changes");
364 
365 struct bridge_control {
366 	int	(*bc_func)(struct bridge_softc *, void *);
367 	int	bc_argsize;
368 	int	bc_flags;
369 };
370 
371 #define	BC_F_COPYIN		0x01	/* copy arguments in */
372 #define	BC_F_COPYOUT		0x02	/* copy arguments out */
373 #define	BC_F_SUSER		0x04	/* do super-user check */
374 
375 const struct bridge_control bridge_control_table[] = {
376 	{ bridge_ioctl_add,		sizeof(struct ifbreq),
377 	  BC_F_COPYIN|BC_F_SUSER },
378 	{ bridge_ioctl_del,		sizeof(struct ifbreq),
379 	  BC_F_COPYIN|BC_F_SUSER },
380 
381 	{ bridge_ioctl_gifflags,	sizeof(struct ifbreq),
382 	  BC_F_COPYIN|BC_F_COPYOUT },
383 	{ bridge_ioctl_sifflags,	sizeof(struct ifbreq),
384 	  BC_F_COPYIN|BC_F_SUSER },
385 
386 	{ bridge_ioctl_scache,		sizeof(struct ifbrparam),
387 	  BC_F_COPYIN|BC_F_SUSER },
388 	{ bridge_ioctl_gcache,		sizeof(struct ifbrparam),
389 	  BC_F_COPYOUT },
390 
391 	{ bridge_ioctl_gifs,		sizeof(struct ifbifconf),
392 	  BC_F_COPYIN|BC_F_COPYOUT },
393 	{ bridge_ioctl_rts,		sizeof(struct ifbaconf),
394 	  BC_F_COPYIN|BC_F_COPYOUT },
395 
396 	{ bridge_ioctl_saddr,		sizeof(struct ifbareq),
397 	  BC_F_COPYIN|BC_F_SUSER },
398 
399 	{ bridge_ioctl_sto,		sizeof(struct ifbrparam),
400 	  BC_F_COPYIN|BC_F_SUSER },
401 	{ bridge_ioctl_gto,		sizeof(struct ifbrparam),
402 	  BC_F_COPYOUT },
403 
404 	{ bridge_ioctl_daddr,		sizeof(struct ifbareq),
405 	  BC_F_COPYIN|BC_F_SUSER },
406 
407 	{ bridge_ioctl_flush,		sizeof(struct ifbreq),
408 	  BC_F_COPYIN|BC_F_SUSER },
409 
410 	{ bridge_ioctl_gpri,		sizeof(struct ifbrparam),
411 	  BC_F_COPYOUT },
412 	{ bridge_ioctl_spri,		sizeof(struct ifbrparam),
413 	  BC_F_COPYIN|BC_F_SUSER },
414 
415 	{ bridge_ioctl_ght,		sizeof(struct ifbrparam),
416 	  BC_F_COPYOUT },
417 	{ bridge_ioctl_sht,		sizeof(struct ifbrparam),
418 	  BC_F_COPYIN|BC_F_SUSER },
419 
420 	{ bridge_ioctl_gfd,		sizeof(struct ifbrparam),
421 	  BC_F_COPYOUT },
422 	{ bridge_ioctl_sfd,		sizeof(struct ifbrparam),
423 	  BC_F_COPYIN|BC_F_SUSER },
424 
425 	{ bridge_ioctl_gma,		sizeof(struct ifbrparam),
426 	  BC_F_COPYOUT },
427 	{ bridge_ioctl_sma,		sizeof(struct ifbrparam),
428 	  BC_F_COPYIN|BC_F_SUSER },
429 
430 	{ bridge_ioctl_sifprio,		sizeof(struct ifbreq),
431 	  BC_F_COPYIN|BC_F_SUSER },
432 
433 	{ bridge_ioctl_sifcost,		sizeof(struct ifbreq),
434 	  BC_F_COPYIN|BC_F_SUSER },
435 
436 	{ bridge_ioctl_addspan,		sizeof(struct ifbreq),
437 	  BC_F_COPYIN|BC_F_SUSER },
438 	{ bridge_ioctl_delspan,		sizeof(struct ifbreq),
439 	  BC_F_COPYIN|BC_F_SUSER },
440 
441 	{ bridge_ioctl_gbparam,		sizeof(struct ifbropreq),
442 	  BC_F_COPYOUT },
443 
444 	{ bridge_ioctl_grte,		sizeof(struct ifbrparam),
445 	  BC_F_COPYOUT },
446 
447 	{ bridge_ioctl_gifsstp,		sizeof(struct ifbpstpconf),
448 	  BC_F_COPYIN|BC_F_COPYOUT },
449 
450 	{ bridge_ioctl_sproto,		sizeof(struct ifbrparam),
451 	  BC_F_COPYIN|BC_F_SUSER },
452 
453 	{ bridge_ioctl_stxhc,		sizeof(struct ifbrparam),
454 	  BC_F_COPYIN|BC_F_SUSER },
455 
456 	{ bridge_ioctl_sifmaxaddr,	sizeof(struct ifbreq),
457 	  BC_F_COPYIN|BC_F_SUSER },
458 
459 };
460 const int bridge_control_table_size =
461     sizeof(bridge_control_table) / sizeof(bridge_control_table[0]);
462 
463 LIST_HEAD(, bridge_softc) bridge_list;
464 
465 IFC_SIMPLE_DECLARE(bridge, 0);
466 
467 static int
468 bridge_modevent(module_t mod, int type, void *data)
469 {
470 
471 	switch (type) {
472 	case MOD_LOAD:
473 		mtx_init(&bridge_list_mtx, "if_bridge list", NULL, MTX_DEF);
474 		if_clone_attach(&bridge_cloner);
475 		bridge_rtnode_zone = uma_zcreate("bridge_rtnode",
476 		    sizeof(struct bridge_rtnode), NULL, NULL, NULL, NULL,
477 		    UMA_ALIGN_PTR, 0);
478 		LIST_INIT(&bridge_list);
479 		bridge_input_p = bridge_input;
480 		bridge_output_p = bridge_output;
481 		bridge_dn_p = bridge_dummynet;
482 		bridge_detach_cookie = EVENTHANDLER_REGISTER(
483 		    ifnet_departure_event, bridge_ifdetach, NULL,
484 		    EVENTHANDLER_PRI_ANY);
485 		break;
486 	case MOD_UNLOAD:
487 		EVENTHANDLER_DEREGISTER(ifnet_departure_event,
488 		    bridge_detach_cookie);
489 		if_clone_detach(&bridge_cloner);
490 		uma_zdestroy(bridge_rtnode_zone);
491 		bridge_input_p = NULL;
492 		bridge_output_p = NULL;
493 		bridge_dn_p = NULL;
494 		mtx_destroy(&bridge_list_mtx);
495 		break;
496 	default:
497 		return (EOPNOTSUPP);
498 	}
499 	return (0);
500 }
501 
502 static moduledata_t bridge_mod = {
503 	"if_bridge",
504 	bridge_modevent,
505 	0
506 };
507 
508 DECLARE_MODULE(if_bridge, bridge_mod, SI_SUB_PSEUDO, SI_ORDER_ANY);
509 MODULE_DEPEND(if_bridge, bridgestp, 1, 1, 1);
510 
511 /*
512  * handler for net.link.bridge.pfil_ipfw
513  */
514 static int
515 sysctl_pfil_ipfw(SYSCTL_HANDLER_ARGS)
516 {
517 	int enable = pfil_ipfw;
518 	int error;
519 
520 	error = sysctl_handle_int(oidp, &enable, 0, req);
521 	enable = (enable) ? 1 : 0;
522 
523 	if (enable != pfil_ipfw) {
524 		pfil_ipfw = enable;
525 
526 		/*
527 		 * Disable pfil so that ipfw doesnt run twice, if the user
528 		 * really wants both then they can re-enable pfil_bridge and/or
529 		 * pfil_member. Also allow non-ip packets as ipfw can filter by
530 		 * layer2 type.
531 		 */
532 		if (pfil_ipfw) {
533 			pfil_onlyip = 0;
534 			pfil_bridge = 0;
535 			pfil_member = 0;
536 		}
537 	}
538 
539 	return (error);
540 }
541 SYSCTL_PROC(_net_link_bridge, OID_AUTO, ipfw, CTLTYPE_INT|CTLFLAG_RW,
542 	    &pfil_ipfw, 0, &sysctl_pfil_ipfw, "I", "Layer2 filter with IPFW");
543 
544 /*
545  * bridge_clone_create:
546  *
547  *	Create a new bridge instance.
548  */
549 static int
550 bridge_clone_create(struct if_clone *ifc, int unit, caddr_t params)
551 {
552 	struct bridge_softc *sc, *sc2;
553 	struct ifnet *bifp, *ifp;
554 	u_char eaddr[6];
555 	int retry;
556 
557 	sc = malloc(sizeof(*sc), M_DEVBUF, M_WAITOK|M_ZERO);
558 	ifp = sc->sc_ifp = if_alloc(IFT_ETHER);
559 	if (ifp == NULL) {
560 		free(sc, M_DEVBUF);
561 		return (ENOSPC);
562 	}
563 
564 	BRIDGE_LOCK_INIT(sc);
565 	sc->sc_brtmax = BRIDGE_RTABLE_MAX;
566 	sc->sc_brttimeout = BRIDGE_RTABLE_TIMEOUT;
567 
568 	/* Initialize our routing table. */
569 	bridge_rtable_init(sc);
570 
571 	callout_init_mtx(&sc->sc_brcallout, &sc->sc_mtx, 0);
572 
573 	LIST_INIT(&sc->sc_iflist);
574 	LIST_INIT(&sc->sc_spanlist);
575 
576 	ifp->if_softc = sc;
577 	if_initname(ifp, ifc->ifc_name, unit);
578 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
579 	ifp->if_ioctl = bridge_ioctl;
580 	ifp->if_start = bridge_start;
581 	ifp->if_init = bridge_init;
582 	ifp->if_type = IFT_BRIDGE;
583 	IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen);
584 	ifp->if_snd.ifq_drv_maxlen = ifqmaxlen;
585 	IFQ_SET_READY(&ifp->if_snd);
586 
587 	/*
588 	 * Generate a random ethernet address with a locally administered
589 	 * address.
590 	 *
591 	 * Since we are using random ethernet addresses for the bridge, it is
592 	 * possible that we might have address collisions, so make sure that
593 	 * this hardware address isn't already in use on another bridge.
594 	 */
595 	for (retry = 1; retry != 0;) {
596 		arc4rand(eaddr, ETHER_ADDR_LEN, 1);
597 		eaddr[0] &= ~1;		/* clear multicast bit */
598 		eaddr[0] |= 2;		/* set the LAA bit */
599 		retry = 0;
600 		mtx_lock(&bridge_list_mtx);
601 		LIST_FOREACH(sc2, &bridge_list, sc_list) {
602 			bifp = sc2->sc_ifp;
603 			if (memcmp(eaddr, IF_LLADDR(bifp), ETHER_ADDR_LEN) == 0)
604 				retry = 1;
605 		}
606 		mtx_unlock(&bridge_list_mtx);
607 	}
608 
609 	bstp_attach(&sc->sc_stp, &bridge_ops);
610 	ether_ifattach(ifp, eaddr);
611 	/* Now undo some of the damage... */
612 	ifp->if_baudrate = 0;
613 	ifp->if_type = IFT_BRIDGE;
614 
615 	mtx_lock(&bridge_list_mtx);
616 	LIST_INSERT_HEAD(&bridge_list, sc, sc_list);
617 	mtx_unlock(&bridge_list_mtx);
618 
619 	return (0);
620 }
621 
622 /*
623  * bridge_clone_destroy:
624  *
625  *	Destroy a bridge instance.
626  */
627 static void
628 bridge_clone_destroy(struct ifnet *ifp)
629 {
630 	struct bridge_softc *sc = ifp->if_softc;
631 	struct bridge_iflist *bif;
632 
633 	BRIDGE_LOCK(sc);
634 
635 	bridge_stop(ifp, 1);
636 	ifp->if_flags &= ~IFF_UP;
637 
638 	while ((bif = LIST_FIRST(&sc->sc_iflist)) != NULL)
639 		bridge_delete_member(sc, bif, 0);
640 
641 	while ((bif = LIST_FIRST(&sc->sc_spanlist)) != NULL) {
642 		bridge_delete_span(sc, bif);
643 	}
644 
645 	BRIDGE_UNLOCK(sc);
646 
647 	callout_drain(&sc->sc_brcallout);
648 
649 	mtx_lock(&bridge_list_mtx);
650 	LIST_REMOVE(sc, sc_list);
651 	mtx_unlock(&bridge_list_mtx);
652 
653 	bstp_detach(&sc->sc_stp);
654 	ether_ifdetach(ifp);
655 	if_free_type(ifp, IFT_ETHER);
656 
657 	/* Tear down the routing table. */
658 	bridge_rtable_fini(sc);
659 
660 	BRIDGE_LOCK_DESTROY(sc);
661 	free(sc, M_DEVBUF);
662 }
663 
664 /*
665  * bridge_ioctl:
666  *
667  *	Handle a control request from the operator.
668  */
669 static int
670 bridge_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
671 {
672 	struct bridge_softc *sc = ifp->if_softc;
673 	struct thread *td = curthread;
674 	union {
675 		struct ifbreq ifbreq;
676 		struct ifbifconf ifbifconf;
677 		struct ifbareq ifbareq;
678 		struct ifbaconf ifbaconf;
679 		struct ifbrparam ifbrparam;
680 		struct ifbropreq ifbropreq;
681 	} args;
682 	struct ifdrv *ifd = (struct ifdrv *) data;
683 	const struct bridge_control *bc;
684 	int error = 0;
685 
686 	switch (cmd) {
687 
688 	case SIOCADDMULTI:
689 	case SIOCDELMULTI:
690 		break;
691 
692 	case SIOCGDRVSPEC:
693 	case SIOCSDRVSPEC:
694 		if (ifd->ifd_cmd >= bridge_control_table_size) {
695 			error = EINVAL;
696 			break;
697 		}
698 		bc = &bridge_control_table[ifd->ifd_cmd];
699 
700 		if (cmd == SIOCGDRVSPEC &&
701 		    (bc->bc_flags & BC_F_COPYOUT) == 0) {
702 			error = EINVAL;
703 			break;
704 		}
705 		else if (cmd == SIOCSDRVSPEC &&
706 		    (bc->bc_flags & BC_F_COPYOUT) != 0) {
707 			error = EINVAL;
708 			break;
709 		}
710 
711 		if (bc->bc_flags & BC_F_SUSER) {
712 			error = priv_check(td, PRIV_NET_BRIDGE);
713 			if (error)
714 				break;
715 		}
716 
717 		if (ifd->ifd_len != bc->bc_argsize ||
718 		    ifd->ifd_len > sizeof(args)) {
719 			error = EINVAL;
720 			break;
721 		}
722 
723 		bzero(&args, sizeof(args));
724 		if (bc->bc_flags & BC_F_COPYIN) {
725 			error = copyin(ifd->ifd_data, &args, ifd->ifd_len);
726 			if (error)
727 				break;
728 		}
729 
730 		BRIDGE_LOCK(sc);
731 		error = (*bc->bc_func)(sc, &args);
732 		BRIDGE_UNLOCK(sc);
733 		if (error)
734 			break;
735 
736 		if (bc->bc_flags & BC_F_COPYOUT)
737 			error = copyout(&args, ifd->ifd_data, ifd->ifd_len);
738 
739 		break;
740 
741 	case SIOCSIFFLAGS:
742 		if (!(ifp->if_flags & IFF_UP) &&
743 		    (ifp->if_drv_flags & IFF_DRV_RUNNING)) {
744 			/*
745 			 * If interface is marked down and it is running,
746 			 * then stop and disable it.
747 			 */
748 			BRIDGE_LOCK(sc);
749 			bridge_stop(ifp, 1);
750 			BRIDGE_UNLOCK(sc);
751 		} else if ((ifp->if_flags & IFF_UP) &&
752 		    !(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
753 			/*
754 			 * If interface is marked up and it is stopped, then
755 			 * start it.
756 			 */
757 			(*ifp->if_init)(sc);
758 		}
759 		break;
760 
761 	case SIOCSIFMTU:
762 		/* Do not allow the MTU to be changed on the bridge */
763 		error = EINVAL;
764 		break;
765 
766 	default:
767 		/*
768 		 * drop the lock as ether_ioctl() will call bridge_start() and
769 		 * cause the lock to be recursed.
770 		 */
771 		error = ether_ioctl(ifp, cmd, data);
772 		break;
773 	}
774 
775 	return (error);
776 }
777 
778 /*
779  * bridge_mutecaps:
780  *
781  *	Clear or restore unwanted capabilities on the member interface
782  */
783 static void
784 bridge_mutecaps(struct bridge_iflist *bif, int mute)
785 {
786 	struct ifnet *ifp = bif->bif_ifp;
787 	struct ifreq ifr;
788 	int error;
789 
790 	if (ifp->if_ioctl == NULL)
791 		return;
792 
793 	bzero(&ifr, sizeof(ifr));
794 	ifr.ifr_reqcap = ifp->if_capenable;
795 
796 	if (mute) {
797 		/* mask off and save capabilities */
798 		bif->bif_mutecap = ifr.ifr_reqcap & BRIDGE_IFCAPS_MASK;
799 		if (bif->bif_mutecap != 0)
800 			ifr.ifr_reqcap &= ~BRIDGE_IFCAPS_MASK;
801 	} else
802 		/* restore muted capabilities */
803 		ifr.ifr_reqcap |= bif->bif_mutecap;
804 
805 
806 	if (bif->bif_mutecap != 0) {
807 		IFF_LOCKGIANT(ifp);
808 		error = (*ifp->if_ioctl)(ifp, SIOCSIFCAP, (caddr_t)&ifr);
809 		IFF_UNLOCKGIANT(ifp);
810 	}
811 }
812 
813 /*
814  * bridge_lookup_member:
815  *
816  *	Lookup a bridge member interface.
817  */
818 static struct bridge_iflist *
819 bridge_lookup_member(struct bridge_softc *sc, const char *name)
820 {
821 	struct bridge_iflist *bif;
822 	struct ifnet *ifp;
823 
824 	BRIDGE_LOCK_ASSERT(sc);
825 
826 	LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
827 		ifp = bif->bif_ifp;
828 		if (strcmp(ifp->if_xname, name) == 0)
829 			return (bif);
830 	}
831 
832 	return (NULL);
833 }
834 
835 /*
836  * bridge_lookup_member_if:
837  *
838  *	Lookup a bridge member interface by ifnet*.
839  */
840 static struct bridge_iflist *
841 bridge_lookup_member_if(struct bridge_softc *sc, struct ifnet *member_ifp)
842 {
843 	struct bridge_iflist *bif;
844 
845 	BRIDGE_LOCK_ASSERT(sc);
846 
847 	LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
848 		if (bif->bif_ifp == member_ifp)
849 			return (bif);
850 	}
851 
852 	return (NULL);
853 }
854 
855 /*
856  * bridge_delete_member:
857  *
858  *	Delete the specified member interface.
859  */
860 static void
861 bridge_delete_member(struct bridge_softc *sc, struct bridge_iflist *bif,
862     int gone)
863 {
864 	struct ifnet *ifs = bif->bif_ifp;
865 
866 	BRIDGE_LOCK_ASSERT(sc);
867 
868 	if (!gone) {
869 		switch (ifs->if_type) {
870 		case IFT_ETHER:
871 		case IFT_L2VLAN:
872 			/*
873 			 * Take the interface out of promiscuous mode.
874 			 */
875 			(void) ifpromisc(ifs, 0);
876 			bridge_mutecaps(bif, 0);
877 			break;
878 
879 		case IFT_GIF:
880 			break;
881 
882 		default:
883 #ifdef DIAGNOSTIC
884 			panic("bridge_delete_member: impossible");
885 #endif
886 			break;
887 		}
888 	}
889 
890 	if (bif->bif_flags & IFBIF_STP)
891 		bstp_disable(&bif->bif_stp);
892 
893 	ifs->if_bridge = NULL;
894 	BRIDGE_XLOCK(sc);
895 	LIST_REMOVE(bif, bif_next);
896 	BRIDGE_XDROP(sc);
897 
898 	bridge_rtdelete(sc, ifs, IFBF_FLUSHALL);
899 	KASSERT(bif->bif_addrcnt == 0,
900 	    ("%s: %d bridge routes referenced", __func__, bif->bif_addrcnt));
901 
902 	BRIDGE_UNLOCK(sc);
903 	bstp_destroy(&bif->bif_stp);	/* prepare to free */
904 	BRIDGE_LOCK(sc);
905 	free(bif, M_DEVBUF);
906 }
907 
908 /*
909  * bridge_delete_span:
910  *
911  *	Delete the specified span interface.
912  */
913 static void
914 bridge_delete_span(struct bridge_softc *sc, struct bridge_iflist *bif)
915 {
916 	BRIDGE_LOCK_ASSERT(sc);
917 
918 	KASSERT(bif->bif_ifp->if_bridge == NULL,
919 	    ("%s: not a span interface", __func__));
920 
921 	LIST_REMOVE(bif, bif_next);
922 	free(bif, M_DEVBUF);
923 }
924 
925 static int
926 bridge_ioctl_add(struct bridge_softc *sc, void *arg)
927 {
928 	struct ifbreq *req = arg;
929 	struct bridge_iflist *bif = NULL;
930 	struct ifnet *ifs;
931 	int error = 0;
932 
933 	ifs = ifunit(req->ifbr_ifsname);
934 	if (ifs == NULL)
935 		return (ENOENT);
936 
937 	/* If it's in the span list, it can't be a member. */
938 	LIST_FOREACH(bif, &sc->sc_spanlist, bif_next)
939 		if (ifs == bif->bif_ifp)
940 			return (EBUSY);
941 
942 	/* Allow the first Ethernet member to define the MTU */
943 	if (ifs->if_type != IFT_GIF) {
944 		if (LIST_EMPTY(&sc->sc_iflist))
945 			sc->sc_ifp->if_mtu = ifs->if_mtu;
946 		else if (sc->sc_ifp->if_mtu != ifs->if_mtu) {
947 			if_printf(sc->sc_ifp, "invalid MTU for %s\n",
948 			    ifs->if_xname);
949 			return (EINVAL);
950 		}
951 	}
952 
953 	if (ifs->if_bridge == sc)
954 		return (EEXIST);
955 
956 	if (ifs->if_bridge != NULL)
957 		return (EBUSY);
958 
959 	bif = malloc(sizeof(*bif), M_DEVBUF, M_NOWAIT|M_ZERO);
960 	if (bif == NULL)
961 		return (ENOMEM);
962 
963 	bif->bif_ifp = ifs;
964 	bif->bif_flags = IFBIF_LEARNING | IFBIF_DISCOVER;
965 
966 	switch (ifs->if_type) {
967 	case IFT_ETHER:
968 	case IFT_L2VLAN:
969 		/*
970 		 * Place the interface into promiscuous mode.
971 		 */
972 		error = ifpromisc(ifs, 1);
973 		if (error)
974 			goto out;
975 
976 		bridge_mutecaps(bif, 1);
977 		break;
978 
979 	case IFT_GIF:
980 		break;
981 
982 	default:
983 		error = EINVAL;
984 		goto out;
985 	}
986 
987 	ifs->if_bridge = sc;
988 	bstp_create(&sc->sc_stp, &bif->bif_stp, bif->bif_ifp);
989 	/*
990 	 * XXX: XLOCK HERE!?!
991 	 *
992 	 * NOTE: insert_***HEAD*** should be safe for the traversals.
993 	 */
994 	LIST_INSERT_HEAD(&sc->sc_iflist, bif, bif_next);
995 
996 out:
997 	if (error) {
998 		if (bif != NULL)
999 			free(bif, M_DEVBUF);
1000 	}
1001 	return (error);
1002 }
1003 
1004 static int
1005 bridge_ioctl_del(struct bridge_softc *sc, void *arg)
1006 {
1007 	struct ifbreq *req = arg;
1008 	struct bridge_iflist *bif;
1009 
1010 	bif = bridge_lookup_member(sc, req->ifbr_ifsname);
1011 	if (bif == NULL)
1012 		return (ENOENT);
1013 
1014 	bridge_delete_member(sc, bif, 0);
1015 
1016 	return (0);
1017 }
1018 
1019 static int
1020 bridge_ioctl_gifflags(struct bridge_softc *sc, void *arg)
1021 {
1022 	struct ifbreq *req = arg;
1023 	struct bridge_iflist *bif;
1024 	struct bstp_port *bp;
1025 
1026 	bif = bridge_lookup_member(sc, req->ifbr_ifsname);
1027 	if (bif == NULL)
1028 		return (ENOENT);
1029 
1030 	bp = &bif->bif_stp;
1031 	req->ifbr_ifsflags = bif->bif_flags;
1032 	req->ifbr_state = bp->bp_state;
1033 	req->ifbr_priority = bp->bp_priority;
1034 	req->ifbr_path_cost = bp->bp_path_cost;
1035 	req->ifbr_portno = bif->bif_ifp->if_index & 0xfff;
1036 	req->ifbr_proto = bp->bp_protover;
1037 	req->ifbr_role = bp->bp_role;
1038 	req->ifbr_stpflags = bp->bp_flags;
1039 	req->ifbr_addrcnt = bif->bif_addrcnt;
1040 	req->ifbr_addrmax = bif->bif_addrmax;
1041 	req->ifbr_addrexceeded = bif->bif_addrexceeded;
1042 
1043 	/* Copy STP state options as flags */
1044 	if (bp->bp_operedge)
1045 		req->ifbr_ifsflags |= IFBIF_BSTP_EDGE;
1046 	if (bp->bp_flags & BSTP_PORT_AUTOEDGE)
1047 		req->ifbr_ifsflags |= IFBIF_BSTP_AUTOEDGE;
1048 	if (bp->bp_ptp_link)
1049 		req->ifbr_ifsflags |= IFBIF_BSTP_PTP;
1050 	if (bp->bp_flags & BSTP_PORT_AUTOPTP)
1051 		req->ifbr_ifsflags |= IFBIF_BSTP_AUTOPTP;
1052 	if (bp->bp_flags & BSTP_PORT_ADMEDGE)
1053 		req->ifbr_ifsflags |= IFBIF_BSTP_ADMEDGE;
1054 	if (bp->bp_flags & BSTP_PORT_ADMCOST)
1055 		req->ifbr_ifsflags |= IFBIF_BSTP_ADMCOST;
1056 	return (0);
1057 }
1058 
1059 static int
1060 bridge_ioctl_sifflags(struct bridge_softc *sc, void *arg)
1061 {
1062 	struct ifbreq *req = arg;
1063 	struct bridge_iflist *bif;
1064 	struct bstp_port *bp;
1065 	int error;
1066 
1067 	bif = bridge_lookup_member(sc, req->ifbr_ifsname);
1068 	if (bif == NULL)
1069 		return (ENOENT);
1070 	bp = &bif->bif_stp;
1071 
1072 	if (req->ifbr_ifsflags & IFBIF_SPAN)
1073 		/* SPAN is readonly */
1074 		return (EINVAL);
1075 
1076 	if (req->ifbr_ifsflags & IFBIF_STP) {
1077 		if ((bif->bif_flags & IFBIF_STP) == 0) {
1078 			error = bstp_enable(&bif->bif_stp);
1079 			if (error)
1080 				return (error);
1081 		}
1082 	} else {
1083 		if ((bif->bif_flags & IFBIF_STP) != 0)
1084 			bstp_disable(&bif->bif_stp);
1085 	}
1086 
1087 	/* Pass on STP flags */
1088 	bstp_set_edge(bp, req->ifbr_ifsflags & IFBIF_BSTP_EDGE ? 1 : 0);
1089 	bstp_set_autoedge(bp, req->ifbr_ifsflags & IFBIF_BSTP_AUTOEDGE ? 1 : 0);
1090 	bstp_set_ptp(bp, req->ifbr_ifsflags & IFBIF_BSTP_PTP ? 1 : 0);
1091 	bstp_set_autoptp(bp, req->ifbr_ifsflags & IFBIF_BSTP_AUTOPTP ? 1 : 0);
1092 
1093 	/* Save the bits relating to the bridge */
1094 	bif->bif_flags = req->ifbr_ifsflags & IFBIFMASK;
1095 
1096 	return (0);
1097 }
1098 
1099 static int
1100 bridge_ioctl_scache(struct bridge_softc *sc, void *arg)
1101 {
1102 	struct ifbrparam *param = arg;
1103 
1104 	sc->sc_brtmax = param->ifbrp_csize;
1105 	bridge_rttrim(sc);
1106 
1107 	return (0);
1108 }
1109 
1110 static int
1111 bridge_ioctl_gcache(struct bridge_softc *sc, void *arg)
1112 {
1113 	struct ifbrparam *param = arg;
1114 
1115 	param->ifbrp_csize = sc->sc_brtmax;
1116 
1117 	return (0);
1118 }
1119 
1120 static int
1121 bridge_ioctl_gifs(struct bridge_softc *sc, void *arg)
1122 {
1123 	struct ifbifconf *bifc = arg;
1124 	struct bridge_iflist *bif;
1125 	struct ifbreq breq;
1126 	char *buf, *outbuf;
1127 	int count, buflen, len, error = 0;
1128 
1129 	count = 0;
1130 	LIST_FOREACH(bif, &sc->sc_iflist, bif_next)
1131 		count++;
1132 	LIST_FOREACH(bif, &sc->sc_spanlist, bif_next)
1133 		count++;
1134 
1135 	buflen = sizeof(breq) * count;
1136 	if (bifc->ifbic_len == 0) {
1137 		bifc->ifbic_len = buflen;
1138 		return (0);
1139 	}
1140 	BRIDGE_UNLOCK(sc);
1141 	outbuf = malloc(buflen, M_TEMP, M_WAITOK | M_ZERO);
1142 	BRIDGE_LOCK(sc);
1143 
1144 	count = 0;
1145 	buf = outbuf;
1146 	len = min(bifc->ifbic_len, buflen);
1147 	bzero(&breq, sizeof(breq));
1148 	LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
1149 		if (len < sizeof(breq))
1150 			break;
1151 
1152 		strlcpy(breq.ifbr_ifsname, bif->bif_ifp->if_xname,
1153 		    sizeof(breq.ifbr_ifsname));
1154 		/* Fill in the ifbreq structure */
1155 		error = bridge_ioctl_gifflags(sc, &breq);
1156 		if (error)
1157 			break;
1158 		memcpy(buf, &breq, sizeof(breq));
1159 		count++;
1160 		buf += sizeof(breq);
1161 		len -= sizeof(breq);
1162 	}
1163 	LIST_FOREACH(bif, &sc->sc_spanlist, bif_next) {
1164 		if (len < sizeof(breq))
1165 			break;
1166 
1167 		strlcpy(breq.ifbr_ifsname, bif->bif_ifp->if_xname,
1168 		    sizeof(breq.ifbr_ifsname));
1169 		breq.ifbr_ifsflags = bif->bif_flags;
1170 		breq.ifbr_portno = bif->bif_ifp->if_index & 0xfff;
1171 		memcpy(buf, &breq, sizeof(breq));
1172 		count++;
1173 		buf += sizeof(breq);
1174 		len -= sizeof(breq);
1175 	}
1176 
1177 	BRIDGE_UNLOCK(sc);
1178 	bifc->ifbic_len = sizeof(breq) * count;
1179 	error = copyout(outbuf, bifc->ifbic_req, bifc->ifbic_len);
1180 	BRIDGE_LOCK(sc);
1181 	free(outbuf, M_TEMP);
1182 	return (error);
1183 }
1184 
1185 static int
1186 bridge_ioctl_rts(struct bridge_softc *sc, void *arg)
1187 {
1188 	struct ifbaconf *bac = arg;
1189 	struct bridge_rtnode *brt;
1190 	struct ifbareq bareq;
1191 	char *buf, *outbuf;
1192 	int count, buflen, len, error = 0;
1193 
1194 	if (bac->ifbac_len == 0)
1195 		return (0);
1196 
1197 	count = 0;
1198 	LIST_FOREACH(brt, &sc->sc_rtlist, brt_list)
1199 		count++;
1200 	buflen = sizeof(bareq) * count;
1201 
1202 	BRIDGE_UNLOCK(sc);
1203 	outbuf = malloc(buflen, M_TEMP, M_WAITOK | M_ZERO);
1204 	BRIDGE_LOCK(sc);
1205 
1206 	count = 0;
1207 	buf = outbuf;
1208 	len = min(bac->ifbac_len, buflen);
1209 	bzero(&bareq, sizeof(bareq));
1210 	LIST_FOREACH(brt, &sc->sc_rtlist, brt_list) {
1211 		if (len < sizeof(bareq))
1212 			goto out;
1213 		strlcpy(bareq.ifba_ifsname, brt->brt_ifp->if_xname,
1214 		    sizeof(bareq.ifba_ifsname));
1215 		memcpy(bareq.ifba_dst, brt->brt_addr, sizeof(brt->brt_addr));
1216 		bareq.ifba_vlan = brt->brt_vlan;
1217 		if ((brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC &&
1218 				time_uptime < brt->brt_expire)
1219 			bareq.ifba_expire = brt->brt_expire - time_uptime;
1220 		else
1221 			bareq.ifba_expire = 0;
1222 		bareq.ifba_flags = brt->brt_flags;
1223 
1224 		memcpy(buf, &bareq, sizeof(bareq));
1225 		count++;
1226 		buf += sizeof(bareq);
1227 		len -= sizeof(bareq);
1228 	}
1229 out:
1230 	BRIDGE_UNLOCK(sc);
1231 	bac->ifbac_len = sizeof(bareq) * count;
1232 	error = copyout(outbuf, bac->ifbac_req, bac->ifbac_len);
1233 	BRIDGE_LOCK(sc);
1234 	free(outbuf, M_TEMP);
1235 	return (error);
1236 }
1237 
1238 static int
1239 bridge_ioctl_saddr(struct bridge_softc *sc, void *arg)
1240 {
1241 	struct ifbareq *req = arg;
1242 	struct bridge_iflist *bif;
1243 	int error;
1244 
1245 	bif = bridge_lookup_member(sc, req->ifba_ifsname);
1246 	if (bif == NULL)
1247 		return (ENOENT);
1248 
1249 	error = bridge_rtupdate(sc, req->ifba_dst, req->ifba_vlan, bif, 1,
1250 	    req->ifba_flags);
1251 
1252 	return (error);
1253 }
1254 
1255 static int
1256 bridge_ioctl_sto(struct bridge_softc *sc, void *arg)
1257 {
1258 	struct ifbrparam *param = arg;
1259 
1260 	sc->sc_brttimeout = param->ifbrp_ctime;
1261 	return (0);
1262 }
1263 
1264 static int
1265 bridge_ioctl_gto(struct bridge_softc *sc, void *arg)
1266 {
1267 	struct ifbrparam *param = arg;
1268 
1269 	param->ifbrp_ctime = sc->sc_brttimeout;
1270 	return (0);
1271 }
1272 
1273 static int
1274 bridge_ioctl_daddr(struct bridge_softc *sc, void *arg)
1275 {
1276 	struct ifbareq *req = arg;
1277 
1278 	return (bridge_rtdaddr(sc, req->ifba_dst, req->ifba_vlan));
1279 }
1280 
1281 static int
1282 bridge_ioctl_flush(struct bridge_softc *sc, void *arg)
1283 {
1284 	struct ifbreq *req = arg;
1285 
1286 	bridge_rtflush(sc, req->ifbr_ifsflags);
1287 	return (0);
1288 }
1289 
1290 static int
1291 bridge_ioctl_gpri(struct bridge_softc *sc, void *arg)
1292 {
1293 	struct ifbrparam *param = arg;
1294 	struct bstp_state *bs = &sc->sc_stp;
1295 
1296 	param->ifbrp_prio = bs->bs_bridge_priority;
1297 	return (0);
1298 }
1299 
1300 static int
1301 bridge_ioctl_spri(struct bridge_softc *sc, void *arg)
1302 {
1303 	struct ifbrparam *param = arg;
1304 
1305 	return (bstp_set_priority(&sc->sc_stp, param->ifbrp_prio));
1306 }
1307 
1308 static int
1309 bridge_ioctl_ght(struct bridge_softc *sc, void *arg)
1310 {
1311 	struct ifbrparam *param = arg;
1312 	struct bstp_state *bs = &sc->sc_stp;
1313 
1314 	param->ifbrp_hellotime = bs->bs_bridge_htime >> 8;
1315 	return (0);
1316 }
1317 
1318 static int
1319 bridge_ioctl_sht(struct bridge_softc *sc, void *arg)
1320 {
1321 	struct ifbrparam *param = arg;
1322 
1323 	return (bstp_set_htime(&sc->sc_stp, param->ifbrp_hellotime));
1324 }
1325 
1326 static int
1327 bridge_ioctl_gfd(struct bridge_softc *sc, void *arg)
1328 {
1329 	struct ifbrparam *param = arg;
1330 	struct bstp_state *bs = &sc->sc_stp;
1331 
1332 	param->ifbrp_fwddelay = bs->bs_bridge_fdelay >> 8;
1333 	return (0);
1334 }
1335 
1336 static int
1337 bridge_ioctl_sfd(struct bridge_softc *sc, void *arg)
1338 {
1339 	struct ifbrparam *param = arg;
1340 
1341 	return (bstp_set_fdelay(&sc->sc_stp, param->ifbrp_fwddelay));
1342 }
1343 
1344 static int
1345 bridge_ioctl_gma(struct bridge_softc *sc, void *arg)
1346 {
1347 	struct ifbrparam *param = arg;
1348 	struct bstp_state *bs = &sc->sc_stp;
1349 
1350 	param->ifbrp_maxage = bs->bs_bridge_max_age >> 8;
1351 	return (0);
1352 }
1353 
1354 static int
1355 bridge_ioctl_sma(struct bridge_softc *sc, void *arg)
1356 {
1357 	struct ifbrparam *param = arg;
1358 
1359 	return (bstp_set_maxage(&sc->sc_stp, param->ifbrp_maxage));
1360 }
1361 
1362 static int
1363 bridge_ioctl_sifprio(struct bridge_softc *sc, void *arg)
1364 {
1365 	struct ifbreq *req = arg;
1366 	struct bridge_iflist *bif;
1367 
1368 	bif = bridge_lookup_member(sc, req->ifbr_ifsname);
1369 	if (bif == NULL)
1370 		return (ENOENT);
1371 
1372 	return (bstp_set_port_priority(&bif->bif_stp, req->ifbr_priority));
1373 }
1374 
1375 static int
1376 bridge_ioctl_sifcost(struct bridge_softc *sc, void *arg)
1377 {
1378 	struct ifbreq *req = arg;
1379 	struct bridge_iflist *bif;
1380 
1381 	bif = bridge_lookup_member(sc, req->ifbr_ifsname);
1382 	if (bif == NULL)
1383 		return (ENOENT);
1384 
1385 	return (bstp_set_path_cost(&bif->bif_stp, req->ifbr_path_cost));
1386 }
1387 
1388 static int
1389 bridge_ioctl_sifmaxaddr(struct bridge_softc *sc, void *arg)
1390 {
1391 	struct ifbreq *req = arg;
1392 	struct bridge_iflist *bif;
1393 
1394 	bif = bridge_lookup_member(sc, req->ifbr_ifsname);
1395 	if (bif == NULL)
1396 		return (ENOENT);
1397 
1398 	bif->bif_addrmax = req->ifbr_addrmax;
1399 	return (0);
1400 }
1401 
1402 static int
1403 bridge_ioctl_addspan(struct bridge_softc *sc, void *arg)
1404 {
1405 	struct ifbreq *req = arg;
1406 	struct bridge_iflist *bif = NULL;
1407 	struct ifnet *ifs;
1408 
1409 	ifs = ifunit(req->ifbr_ifsname);
1410 	if (ifs == NULL)
1411 		return (ENOENT);
1412 
1413 	LIST_FOREACH(bif, &sc->sc_spanlist, bif_next)
1414 		if (ifs == bif->bif_ifp)
1415 			return (EBUSY);
1416 
1417 	if (ifs->if_bridge != NULL)
1418 		return (EBUSY);
1419 
1420 	switch (ifs->if_type) {
1421 		case IFT_ETHER:
1422 		case IFT_GIF:
1423 		case IFT_L2VLAN:
1424 			break;
1425 		default:
1426 			return (EINVAL);
1427 	}
1428 
1429 	bif = malloc(sizeof(*bif), M_DEVBUF, M_NOWAIT|M_ZERO);
1430 	if (bif == NULL)
1431 		return (ENOMEM);
1432 
1433 	bif->bif_ifp = ifs;
1434 	bif->bif_flags = IFBIF_SPAN;
1435 
1436 	LIST_INSERT_HEAD(&sc->sc_spanlist, bif, bif_next);
1437 
1438 	return (0);
1439 }
1440 
1441 static int
1442 bridge_ioctl_delspan(struct bridge_softc *sc, void *arg)
1443 {
1444 	struct ifbreq *req = arg;
1445 	struct bridge_iflist *bif;
1446 	struct ifnet *ifs;
1447 
1448 	ifs = ifunit(req->ifbr_ifsname);
1449 	if (ifs == NULL)
1450 		return (ENOENT);
1451 
1452 	LIST_FOREACH(bif, &sc->sc_spanlist, bif_next)
1453 		if (ifs == bif->bif_ifp)
1454 			break;
1455 
1456 	if (bif == NULL)
1457 		return (ENOENT);
1458 
1459 	bridge_delete_span(sc, bif);
1460 
1461 	return (0);
1462 }
1463 
1464 static int
1465 bridge_ioctl_gbparam(struct bridge_softc *sc, void *arg)
1466 {
1467 	struct ifbropreq *req = arg;
1468 	struct bstp_state *bs = &sc->sc_stp;
1469 	struct bstp_port *root_port;
1470 
1471 	req->ifbop_maxage = bs->bs_bridge_max_age >> 8;
1472 	req->ifbop_hellotime = bs->bs_bridge_htime >> 8;
1473 	req->ifbop_fwddelay = bs->bs_bridge_fdelay >> 8;
1474 
1475 	root_port = bs->bs_root_port;
1476 	if (root_port == NULL)
1477 		req->ifbop_root_port = 0;
1478 	else
1479 		req->ifbop_root_port = root_port->bp_ifp->if_index;
1480 
1481 	req->ifbop_holdcount = bs->bs_txholdcount;
1482 	req->ifbop_priority = bs->bs_bridge_priority;
1483 	req->ifbop_protocol = bs->bs_protover;
1484 	req->ifbop_root_path_cost = bs->bs_root_pv.pv_cost;
1485 	req->ifbop_bridgeid = bs->bs_bridge_pv.pv_dbridge_id;
1486 	req->ifbop_designated_root = bs->bs_root_pv.pv_root_id;
1487 	req->ifbop_designated_bridge = bs->bs_root_pv.pv_dbridge_id;
1488 	req->ifbop_last_tc_time.tv_sec = bs->bs_last_tc_time.tv_sec;
1489 	req->ifbop_last_tc_time.tv_usec = bs->bs_last_tc_time.tv_usec;
1490 
1491 	return (0);
1492 }
1493 
1494 static int
1495 bridge_ioctl_grte(struct bridge_softc *sc, void *arg)
1496 {
1497 	struct ifbrparam *param = arg;
1498 
1499 	param->ifbrp_cexceeded = sc->sc_brtexceeded;
1500 	return (0);
1501 }
1502 
1503 static int
1504 bridge_ioctl_gifsstp(struct bridge_softc *sc, void *arg)
1505 {
1506 	struct ifbpstpconf *bifstp = arg;
1507 	struct bridge_iflist *bif;
1508 	struct bstp_port *bp;
1509 	struct ifbpstpreq bpreq;
1510 	char *buf, *outbuf;
1511 	int count, buflen, len, error = 0;
1512 
1513 	count = 0;
1514 	LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
1515 		if ((bif->bif_flags & IFBIF_STP) != 0)
1516 			count++;
1517 	}
1518 
1519 	buflen = sizeof(bpreq) * count;
1520 	if (bifstp->ifbpstp_len == 0) {
1521 		bifstp->ifbpstp_len = buflen;
1522 		return (0);
1523 	}
1524 
1525 	BRIDGE_UNLOCK(sc);
1526 	outbuf = malloc(buflen, M_TEMP, M_WAITOK | M_ZERO);
1527 	BRIDGE_LOCK(sc);
1528 
1529 	count = 0;
1530 	buf = outbuf;
1531 	len = min(bifstp->ifbpstp_len, buflen);
1532 	bzero(&bpreq, sizeof(bpreq));
1533 	LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
1534 		if (len < sizeof(bpreq))
1535 			break;
1536 
1537 		if ((bif->bif_flags & IFBIF_STP) == 0)
1538 			continue;
1539 
1540 		bp = &bif->bif_stp;
1541 		bpreq.ifbp_portno = bif->bif_ifp->if_index & 0xfff;
1542 		bpreq.ifbp_fwd_trans = bp->bp_forward_transitions;
1543 		bpreq.ifbp_design_cost = bp->bp_desg_pv.pv_cost;
1544 		bpreq.ifbp_design_port = bp->bp_desg_pv.pv_port_id;
1545 		bpreq.ifbp_design_bridge = bp->bp_desg_pv.pv_dbridge_id;
1546 		bpreq.ifbp_design_root = bp->bp_desg_pv.pv_root_id;
1547 
1548 		memcpy(buf, &bpreq, sizeof(bpreq));
1549 		count++;
1550 		buf += sizeof(bpreq);
1551 		len -= sizeof(bpreq);
1552 	}
1553 
1554 	BRIDGE_UNLOCK(sc);
1555 	bifstp->ifbpstp_len = sizeof(bpreq) * count;
1556 	error = copyout(outbuf, bifstp->ifbpstp_req, bifstp->ifbpstp_len);
1557 	BRIDGE_LOCK(sc);
1558 	free(outbuf, M_TEMP);
1559 	return (error);
1560 }
1561 
1562 static int
1563 bridge_ioctl_sproto(struct bridge_softc *sc, void *arg)
1564 {
1565 	struct ifbrparam *param = arg;
1566 
1567 	return (bstp_set_protocol(&sc->sc_stp, param->ifbrp_proto));
1568 }
1569 
1570 static int
1571 bridge_ioctl_stxhc(struct bridge_softc *sc, void *arg)
1572 {
1573 	struct ifbrparam *param = arg;
1574 
1575 	return (bstp_set_holdcount(&sc->sc_stp, param->ifbrp_txhc));
1576 }
1577 
1578 /*
1579  * bridge_ifdetach:
1580  *
1581  *	Detach an interface from a bridge.  Called when a member
1582  *	interface is detaching.
1583  */
1584 static void
1585 bridge_ifdetach(void *arg __unused, struct ifnet *ifp)
1586 {
1587 	struct bridge_softc *sc = ifp->if_bridge;
1588 	struct bridge_iflist *bif;
1589 
1590 	/* Check if the interface is a bridge member */
1591 	if (sc != NULL) {
1592 		BRIDGE_LOCK(sc);
1593 
1594 		bif = bridge_lookup_member_if(sc, ifp);
1595 		if (bif != NULL)
1596 			bridge_delete_member(sc, bif, 1);
1597 
1598 		BRIDGE_UNLOCK(sc);
1599 		return;
1600 	}
1601 
1602 	/* Check if the interface is a span port */
1603 	mtx_lock(&bridge_list_mtx);
1604 	LIST_FOREACH(sc, &bridge_list, sc_list) {
1605 		BRIDGE_LOCK(sc);
1606 		LIST_FOREACH(bif, &sc->sc_spanlist, bif_next)
1607 			if (ifp == bif->bif_ifp) {
1608 				bridge_delete_span(sc, bif);
1609 				break;
1610 			}
1611 
1612 		BRIDGE_UNLOCK(sc);
1613 	}
1614 	mtx_unlock(&bridge_list_mtx);
1615 }
1616 
1617 /*
1618  * bridge_init:
1619  *
1620  *	Initialize a bridge interface.
1621  */
1622 static void
1623 bridge_init(void *xsc)
1624 {
1625 	struct bridge_softc *sc = (struct bridge_softc *)xsc;
1626 	struct ifnet *ifp = sc->sc_ifp;
1627 
1628 	if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1629 		return;
1630 
1631 	BRIDGE_LOCK(sc);
1632 	callout_reset(&sc->sc_brcallout, bridge_rtable_prune_period * hz,
1633 	    bridge_timer, sc);
1634 
1635 	ifp->if_drv_flags |= IFF_DRV_RUNNING;
1636 	bstp_init(&sc->sc_stp);		/* Initialize Spanning Tree */
1637 
1638 	BRIDGE_UNLOCK(sc);
1639 }
1640 
1641 /*
1642  * bridge_stop:
1643  *
1644  *	Stop the bridge interface.
1645  */
1646 static void
1647 bridge_stop(struct ifnet *ifp, int disable)
1648 {
1649 	struct bridge_softc *sc = ifp->if_softc;
1650 
1651 	BRIDGE_LOCK_ASSERT(sc);
1652 
1653 	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1654 		return;
1655 
1656 	callout_stop(&sc->sc_brcallout);
1657 	bstp_stop(&sc->sc_stp);
1658 
1659 	bridge_rtflush(sc, IFBF_FLUSHDYN);
1660 
1661 	ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1662 }
1663 
1664 /*
1665  * bridge_enqueue:
1666  *
1667  *	Enqueue a packet on a bridge member interface.
1668  *
1669  */
1670 static void
1671 bridge_enqueue(struct bridge_softc *sc, struct ifnet *dst_ifp, struct mbuf *m)
1672 {
1673 	int len, err = 0;
1674 	short mflags;
1675 	struct mbuf *m0;
1676 
1677 	len = m->m_pkthdr.len;
1678 	mflags = m->m_flags;
1679 
1680 	/* We may be sending a fragment so traverse the mbuf */
1681 	for (; m; m = m0) {
1682 		m0 = m->m_nextpkt;
1683 		m->m_nextpkt = NULL;
1684 
1685 		/*
1686 		 * If underlying interface can not do VLAN tag insertion itself
1687 		 * then attach a packet tag that holds it.
1688 		 */
1689 		if ((m->m_flags & M_VLANTAG) &&
1690 		    (dst_ifp->if_capenable & IFCAP_VLAN_HWTAGGING) == 0) {
1691 			m = ether_vlanencap(m, m->m_pkthdr.ether_vtag);
1692 			if (m == NULL) {
1693 				if_printf(dst_ifp,
1694 				    "unable to prepend VLAN header\n");
1695 				dst_ifp->if_oerrors++;
1696 				continue;
1697 			}
1698 			m->m_flags &= ~M_VLANTAG;
1699 		}
1700 
1701 		if (err == 0)
1702 			IFQ_ENQUEUE(&dst_ifp->if_snd, m, err);
1703 	}
1704 
1705 	if (err == 0) {
1706 
1707 		sc->sc_ifp->if_opackets++;
1708 		sc->sc_ifp->if_obytes += len;
1709 
1710 		dst_ifp->if_obytes += len;
1711 
1712 		if (mflags & M_MCAST) {
1713 			sc->sc_ifp->if_omcasts++;
1714 			dst_ifp->if_omcasts++;
1715 		}
1716 	}
1717 
1718 	if ((dst_ifp->if_drv_flags & IFF_DRV_OACTIVE) == 0)
1719 		(*dst_ifp->if_start)(dst_ifp);
1720 }
1721 
1722 /*
1723  * bridge_dummynet:
1724  *
1725  * 	Receive a queued packet from dummynet and pass it on to the output
1726  * 	interface.
1727  *
1728  *	The mbuf has the Ethernet header already attached.
1729  */
1730 static void
1731 bridge_dummynet(struct mbuf *m, struct ifnet *ifp)
1732 {
1733 	struct bridge_softc *sc;
1734 
1735 	sc = ifp->if_bridge;
1736 
1737 	/*
1738 	 * The packet didnt originate from a member interface. This should only
1739 	 * ever happen if a member interface is removed while packets are
1740 	 * queued for it.
1741 	 */
1742 	if (sc == NULL) {
1743 		m_freem(m);
1744 		return;
1745 	}
1746 
1747 	if (PFIL_HOOKED(&inet_pfil_hook)
1748 #ifdef INET6
1749 	    || PFIL_HOOKED(&inet6_pfil_hook)
1750 #endif
1751 	    ) {
1752 		if (bridge_pfil(&m, sc->sc_ifp, ifp, PFIL_OUT) != 0)
1753 			return;
1754 		if (m == NULL)
1755 			return;
1756 	}
1757 
1758 	bridge_enqueue(sc, ifp, m);
1759 }
1760 
1761 /*
1762  * bridge_output:
1763  *
1764  *	Send output from a bridge member interface.  This
1765  *	performs the bridging function for locally originated
1766  *	packets.
1767  *
1768  *	The mbuf has the Ethernet header already attached.  We must
1769  *	enqueue or free the mbuf before returning.
1770  */
1771 static int
1772 bridge_output(struct ifnet *ifp, struct mbuf *m, struct sockaddr *sa,
1773     struct rtentry *rt)
1774 {
1775 	struct ether_header *eh;
1776 	struct ifnet *dst_if;
1777 	struct bridge_softc *sc;
1778 	uint16_t vlan;
1779 
1780 	if (m->m_len < ETHER_HDR_LEN) {
1781 		m = m_pullup(m, ETHER_HDR_LEN);
1782 		if (m == NULL)
1783 			return (0);
1784 	}
1785 
1786 	eh = mtod(m, struct ether_header *);
1787 	sc = ifp->if_bridge;
1788 	vlan = VLANTAGOF(m);
1789 
1790 	BRIDGE_LOCK(sc);
1791 
1792 	/*
1793 	 * If bridge is down, but the original output interface is up,
1794 	 * go ahead and send out that interface.  Otherwise, the packet
1795 	 * is dropped below.
1796 	 */
1797 	if ((sc->sc_ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
1798 		dst_if = ifp;
1799 		goto sendunicast;
1800 	}
1801 
1802 	/*
1803 	 * If the packet is a multicast, or we don't know a better way to
1804 	 * get there, send to all interfaces.
1805 	 */
1806 	if (ETHER_IS_MULTICAST(eh->ether_dhost))
1807 		dst_if = NULL;
1808 	else
1809 		dst_if = bridge_rtlookup(sc, eh->ether_dhost, vlan);
1810 	if (dst_if == NULL) {
1811 		struct bridge_iflist *bif;
1812 		struct mbuf *mc;
1813 		int error = 0, used = 0;
1814 
1815 		bridge_span(sc, m);
1816 
1817 		BRIDGE_LOCK2REF(sc, error);
1818 		if (error) {
1819 			m_freem(m);
1820 			return (0);
1821 		}
1822 
1823 		LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
1824 			dst_if = bif->bif_ifp;
1825 
1826 			if (dst_if->if_type == IFT_GIF)
1827 				continue;
1828 			if ((dst_if->if_drv_flags & IFF_DRV_RUNNING) == 0)
1829 				continue;
1830 
1831 			/*
1832 			 * If this is not the original output interface,
1833 			 * and the interface is participating in spanning
1834 			 * tree, make sure the port is in a state that
1835 			 * allows forwarding.
1836 			 */
1837 			if (dst_if != ifp && (bif->bif_flags & IFBIF_STP) &&
1838 			    bif->bif_stp.bp_state == BSTP_IFSTATE_DISCARDING)
1839 				continue;
1840 
1841 			if (LIST_NEXT(bif, bif_next) == NULL) {
1842 				used = 1;
1843 				mc = m;
1844 			} else {
1845 				mc = m_copypacket(m, M_DONTWAIT);
1846 				if (mc == NULL) {
1847 					sc->sc_ifp->if_oerrors++;
1848 					continue;
1849 				}
1850 			}
1851 
1852 			bridge_enqueue(sc, dst_if, mc);
1853 		}
1854 		if (used == 0)
1855 			m_freem(m);
1856 		BRIDGE_UNREF(sc);
1857 		return (0);
1858 	}
1859 
1860 sendunicast:
1861 	/*
1862 	 * XXX Spanning tree consideration here?
1863 	 */
1864 
1865 	bridge_span(sc, m);
1866 	if ((dst_if->if_drv_flags & IFF_DRV_RUNNING) == 0) {
1867 		m_freem(m);
1868 		BRIDGE_UNLOCK(sc);
1869 		return (0);
1870 	}
1871 
1872 	BRIDGE_UNLOCK(sc);
1873 	bridge_enqueue(sc, dst_if, m);
1874 	return (0);
1875 }
1876 
1877 /*
1878  * bridge_start:
1879  *
1880  *	Start output on a bridge.
1881  *
1882  */
1883 static void
1884 bridge_start(struct ifnet *ifp)
1885 {
1886 	struct bridge_softc *sc;
1887 	struct mbuf *m;
1888 	struct ether_header *eh;
1889 	struct ifnet *dst_if;
1890 
1891 	sc = ifp->if_softc;
1892 
1893 	ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1894 	for (;;) {
1895 		IFQ_DEQUEUE(&ifp->if_snd, m);
1896 		if (m == 0)
1897 			break;
1898 		ETHER_BPF_MTAP(ifp, m);
1899 
1900 		eh = mtod(m, struct ether_header *);
1901 		dst_if = NULL;
1902 
1903 		BRIDGE_LOCK(sc);
1904 		if ((m->m_flags & (M_BCAST|M_MCAST)) == 0) {
1905 			dst_if = bridge_rtlookup(sc, eh->ether_dhost, 1);
1906 		}
1907 
1908 		if (dst_if == NULL)
1909 			bridge_broadcast(sc, ifp, m, 0);
1910 		else {
1911 			BRIDGE_UNLOCK(sc);
1912 			bridge_enqueue(sc, dst_if, m);
1913 		}
1914 	}
1915 	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1916 }
1917 
1918 /*
1919  * bridge_forward:
1920  *
1921  *	The forwarding function of the bridge.
1922  *
1923  *	NOTE: Releases the lock on return.
1924  */
1925 static void
1926 bridge_forward(struct bridge_softc *sc, struct bridge_iflist *sbif,
1927     struct mbuf *m)
1928 {
1929 	struct bridge_iflist *dbif;
1930 	struct ifnet *src_if, *dst_if, *ifp;
1931 	struct ether_header *eh;
1932 	uint16_t vlan;
1933 	int error;
1934 
1935 	src_if = m->m_pkthdr.rcvif;
1936 	ifp = sc->sc_ifp;
1937 
1938 	sc->sc_ifp->if_ipackets++;
1939 	sc->sc_ifp->if_ibytes += m->m_pkthdr.len;
1940 	vlan = VLANTAGOF(m);
1941 
1942 	if ((sbif->bif_flags & IFBIF_STP) &&
1943 	    sbif->bif_stp.bp_state == BSTP_IFSTATE_DISCARDING) {
1944 		BRIDGE_UNLOCK(sc);
1945 		m_freem(m);
1946 		return;
1947 	}
1948 
1949 	eh = mtod(m, struct ether_header *);
1950 
1951 	/* If the interface is learning, record the address. */
1952 	if (sbif->bif_flags & IFBIF_LEARNING) {
1953 		error = bridge_rtupdate(sc, eh->ether_shost, vlan,
1954 		    sbif, 0, IFBAF_DYNAMIC);
1955 		/*
1956 		 * If the interface has addresses limits then deny any source
1957 		 * that is not in the cache.
1958 		 */
1959 		if (error && sbif->bif_addrmax) {
1960 			BRIDGE_UNLOCK(sc);
1961 			m_freem(m);
1962 			return;
1963 		}
1964 	}
1965 
1966 	if ((sbif->bif_flags & IFBIF_STP) != 0 &&
1967 	    sbif->bif_stp.bp_state == BSTP_IFSTATE_LEARNING) {
1968 		m_freem(m);
1969 		BRIDGE_UNLOCK(sc);
1970 		return;
1971 	}
1972 
1973 	/*
1974 	 * At this point, the port either doesn't participate
1975 	 * in spanning tree or it is in the forwarding state.
1976 	 */
1977 
1978 	/*
1979 	 * If the packet is unicast, destined for someone on
1980 	 * "this" side of the bridge, drop it.
1981 	 */
1982 	if ((m->m_flags & (M_BCAST|M_MCAST)) == 0) {
1983 		dst_if = bridge_rtlookup(sc, eh->ether_dhost, vlan);
1984 		if (src_if == dst_if) {
1985 			BRIDGE_UNLOCK(sc);
1986 			m_freem(m);
1987 			return;
1988 		}
1989 	} else {
1990 		/* ...forward it to all interfaces. */
1991 		sc->sc_ifp->if_imcasts++;
1992 		dst_if = NULL;
1993 	}
1994 
1995 	/*
1996 	 * If we have a destination interface which is a member of our bridge,
1997 	 * OR this is a unicast packet, push it through the bpf(4) machinery.
1998 	 * For broadcast or multicast packets, don't bother because it will
1999 	 * be reinjected into ether_input. We do this before we pass the packets
2000 	 * through the pfil(9) framework, as it is possible that pfil(9) will
2001 	 * drop the packet, or possibly modify it, making it difficult to debug
2002 	 * firewall issues on the bridge.
2003 	 */
2004 	if (dst_if != NULL || (m->m_flags & (M_BCAST | M_MCAST)) == 0)
2005 		ETHER_BPF_MTAP(ifp, m);
2006 
2007 	/* run the packet filter */
2008 	if (PFIL_HOOKED(&inet_pfil_hook)
2009 #ifdef INET6
2010 	    || PFIL_HOOKED(&inet6_pfil_hook)
2011 #endif
2012 	    ) {
2013 		BRIDGE_UNLOCK(sc);
2014 		if (bridge_pfil(&m, ifp, src_if, PFIL_IN) != 0)
2015 			return;
2016 		if (m == NULL)
2017 			return;
2018 		BRIDGE_LOCK(sc);
2019 	}
2020 
2021 	if (dst_if == NULL) {
2022 		bridge_broadcast(sc, src_if, m, 1);
2023 		return;
2024 	}
2025 
2026 	/*
2027 	 * At this point, we're dealing with a unicast frame
2028 	 * going to a different interface.
2029 	 */
2030 	if ((dst_if->if_drv_flags & IFF_DRV_RUNNING) == 0) {
2031 		BRIDGE_UNLOCK(sc);
2032 		m_freem(m);
2033 		return;
2034 	}
2035 	dbif = bridge_lookup_member_if(sc, dst_if);
2036 	if (dbif == NULL) {
2037 		/* Not a member of the bridge (anymore?) */
2038 		BRIDGE_UNLOCK(sc);
2039 		m_freem(m);
2040 		return;
2041 	}
2042 
2043 	/* Private segments can not talk to each other */
2044 	if (sbif->bif_flags & dbif->bif_flags & IFBIF_PRIVATE) {
2045 		BRIDGE_UNLOCK(sc);
2046 		m_freem(m);
2047 		return;
2048 	}
2049 
2050 	if ((dbif->bif_flags & IFBIF_STP) &&
2051 	    dbif->bif_stp.bp_state == BSTP_IFSTATE_DISCARDING) {
2052 		BRIDGE_UNLOCK(sc);
2053 		m_freem(m);
2054 		return;
2055 	}
2056 
2057 	BRIDGE_UNLOCK(sc);
2058 
2059 	if (PFIL_HOOKED(&inet_pfil_hook)
2060 #ifdef INET6
2061 	    || PFIL_HOOKED(&inet6_pfil_hook)
2062 #endif
2063 	    ) {
2064 		if (bridge_pfil(&m, sc->sc_ifp, dst_if, PFIL_OUT) != 0)
2065 			return;
2066 		if (m == NULL)
2067 			return;
2068 	}
2069 
2070 	bridge_enqueue(sc, dst_if, m);
2071 }
2072 
2073 /*
2074  * bridge_input:
2075  *
2076  *	Receive input from a member interface.  Queue the packet for
2077  *	bridging if it is not for us.
2078  */
2079 static struct mbuf *
2080 bridge_input(struct ifnet *ifp, struct mbuf *m)
2081 {
2082 	struct bridge_softc *sc = ifp->if_bridge;
2083 	struct bridge_iflist *bif, *bif2;
2084 	struct ifnet *bifp;
2085 	struct ether_header *eh;
2086 	struct mbuf *mc, *mc2;
2087 	uint16_t vlan;
2088 	int error;
2089 
2090 	if ((sc->sc_ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
2091 		return (m);
2092 
2093 	bifp = sc->sc_ifp;
2094 	vlan = VLANTAGOF(m);
2095 
2096 	/*
2097 	 * Implement support for bridge monitoring. If this flag has been
2098 	 * set on this interface, discard the packet once we push it through
2099 	 * the bpf(4) machinery, but before we do, increment the byte and
2100 	 * packet counters associated with this interface.
2101 	 */
2102 	if ((bifp->if_flags & IFF_MONITOR) != 0) {
2103 		m->m_pkthdr.rcvif  = bifp;
2104 		ETHER_BPF_MTAP(bifp, m);
2105 		bifp->if_ipackets++;
2106 		bifp->if_ibytes += m->m_pkthdr.len;
2107 		m_freem(m);
2108 		return (NULL);
2109 	}
2110 	BRIDGE_LOCK(sc);
2111 	bif = bridge_lookup_member_if(sc, ifp);
2112 	if (bif == NULL) {
2113 		BRIDGE_UNLOCK(sc);
2114 		return (m);
2115 	}
2116 
2117 	eh = mtod(m, struct ether_header *);
2118 
2119 	if (memcmp(eh->ether_dhost, IF_LLADDR(bifp),
2120 	    ETHER_ADDR_LEN) == 0) {
2121 		/*
2122 		 * Filter on the physical interface.
2123 		 */
2124 		if (pfil_local_phys && (PFIL_HOOKED(&inet_pfil_hook)
2125 #ifdef INET6
2126 		    || PFIL_HOOKED(&inet6_pfil_hook)
2127 #endif
2128 		    )) {
2129 			if (bridge_pfil(&m, NULL, ifp, PFIL_IN) != 0 ||
2130 			    m == NULL) {
2131 				BRIDGE_UNLOCK(sc);
2132 				return (NULL);
2133 			}
2134 		}
2135 
2136 		/*
2137 		 * If the packet is for us, set the packets source as the
2138 		 * bridge, and return the packet back to ether_input for
2139 		 * local processing.
2140 		 */
2141 
2142 		/* Note where to send the reply to */
2143 		if (bif->bif_flags & IFBIF_LEARNING) {
2144 			error = bridge_rtupdate(sc,
2145 			    eh->ether_shost, vlan, bif, 0, IFBAF_DYNAMIC);
2146 			/*
2147 			 * If the interface has addresses limits then deny any
2148 			 * source that is not in the cache.
2149 			 */
2150 			if (error && bif->bif_addrmax) {
2151 				BRIDGE_UNLOCK(sc);
2152 				m_freem(m);
2153 				return (NULL);
2154 			}
2155 		}
2156 
2157 		/* Mark the packet as arriving on the bridge interface */
2158 		m->m_pkthdr.rcvif = bifp;
2159 		ETHER_BPF_MTAP(bifp, m);
2160 		bifp->if_ipackets++;
2161 
2162 		BRIDGE_UNLOCK(sc);
2163 		return (m);
2164 	}
2165 
2166 	bridge_span(sc, m);
2167 
2168 	if (m->m_flags & (M_BCAST|M_MCAST)) {
2169 		/* Tap off 802.1D packets; they do not get forwarded. */
2170 		if (memcmp(eh->ether_dhost, bstp_etheraddr,
2171 		    ETHER_ADDR_LEN) == 0) {
2172 			m = bstp_input(&bif->bif_stp, ifp, m);
2173 			if (m == NULL) {
2174 				BRIDGE_UNLOCK(sc);
2175 				return (NULL);
2176 			}
2177 		}
2178 
2179 		if ((bif->bif_flags & IFBIF_STP) &&
2180 		    bif->bif_stp.bp_state == BSTP_IFSTATE_DISCARDING) {
2181 			BRIDGE_UNLOCK(sc);
2182 			return (m);
2183 		}
2184 
2185 		/*
2186 		 * Make a deep copy of the packet and enqueue the copy
2187 		 * for bridge processing; return the original packet for
2188 		 * local processing.
2189 		 */
2190 		mc = m_dup(m, M_DONTWAIT);
2191 		if (mc == NULL) {
2192 			BRIDGE_UNLOCK(sc);
2193 			return (m);
2194 		}
2195 
2196 		/* Perform the bridge forwarding function with the copy. */
2197 		bridge_forward(sc, bif, mc);
2198 
2199 		/*
2200 		 * Reinject the mbuf as arriving on the bridge so we have a
2201 		 * chance at claiming multicast packets. We can not loop back
2202 		 * here from ether_input as a bridge is never a member of a
2203 		 * bridge.
2204 		 */
2205 		KASSERT(bifp->if_bridge == NULL,
2206 		    ("loop created in bridge_input"));
2207 		mc2 = m_dup(m, M_DONTWAIT);
2208 		if (mc2 != NULL) {
2209 			/* Keep the layer3 header aligned */
2210 			int i = min(mc2->m_pkthdr.len, max_protohdr);
2211 			mc2 = m_copyup(mc2, i, ETHER_ALIGN);
2212 		}
2213 		if (mc2 != NULL) {
2214 			mc2->m_pkthdr.rcvif = bifp;
2215 			(*bifp->if_input)(bifp, mc2);
2216 		}
2217 
2218 		/* Return the original packet for local processing. */
2219 		return (m);
2220 	}
2221 
2222 	if ((bif->bif_flags & IFBIF_STP) &&
2223 	    bif->bif_stp.bp_state == BSTP_IFSTATE_DISCARDING) {
2224 		BRIDGE_UNLOCK(sc);
2225 		return (m);
2226 	}
2227 
2228 #ifdef DEV_CARP
2229 #   define OR_CARP_CHECK_WE_ARE_DST(iface) \
2230 	|| ((iface)->if_carp \
2231 	    && carp_forus((iface)->if_carp, eh->ether_dhost))
2232 #   define OR_CARP_CHECK_WE_ARE_SRC(iface) \
2233 	|| ((iface)->if_carp \
2234 	    && carp_forus((iface)->if_carp, eh->ether_shost))
2235 #else
2236 #   define OR_CARP_CHECK_WE_ARE_DST(iface)
2237 #   define OR_CARP_CHECK_WE_ARE_SRC(iface)
2238 #endif
2239 
2240 #define GRAB_OUR_PACKETS(iface) \
2241 	if ((iface)->if_type == IFT_GIF) \
2242 		continue; \
2243 	/* It is destined for us. */ \
2244 	if (memcmp(IF_LLADDR((iface)), eh->ether_dhost,  ETHER_ADDR_LEN) == 0 \
2245 	    OR_CARP_CHECK_WE_ARE_DST((iface))				\
2246 	    ) {								\
2247 		if (bif->bif_flags & IFBIF_LEARNING) {			\
2248 			error = bridge_rtupdate(sc, eh->ether_shost,	\
2249 			    vlan, bif, 0, IFBAF_DYNAMIC);		\
2250 			if (error && bif->bif_addrmax) {		\
2251 				BRIDGE_UNLOCK(sc);			\
2252 				m_freem(m);				\
2253 				return (NULL);				\
2254 			}						\
2255 		}							\
2256 		m->m_pkthdr.rcvif = iface;				\
2257 		BRIDGE_UNLOCK(sc);					\
2258 		return (m);						\
2259 	}								\
2260 									\
2261 	/* We just received a packet that we sent out. */		\
2262 	if (memcmp(IF_LLADDR((iface)), eh->ether_shost, ETHER_ADDR_LEN) == 0 \
2263 	    OR_CARP_CHECK_WE_ARE_SRC((iface))			\
2264 	    ) {								\
2265 		BRIDGE_UNLOCK(sc);					\
2266 		m_freem(m);						\
2267 		return (NULL);						\
2268 	}
2269 
2270 	/*
2271 	 * Unicast.  Make sure it's not for us.
2272 	 *
2273 	 * Give a chance for ifp at first priority. This will help when	the
2274 	 * packet comes through the interface like VLAN's with the same MACs
2275 	 * on several interfaces from the same bridge. This also will save
2276 	 * some CPU cycles in case the destination interface and the input
2277 	 * interface (eq ifp) are the same.
2278 	 */
2279 	do { GRAB_OUR_PACKETS(ifp) } while (0);
2280 
2281 	/* Now check the all bridge members. */
2282 	LIST_FOREACH(bif2, &sc->sc_iflist, bif_next) {
2283 		GRAB_OUR_PACKETS(bif2->bif_ifp)
2284 	}
2285 
2286 #undef OR_CARP_CHECK_WE_ARE_DST
2287 #undef OR_CARP_CHECK_WE_ARE_SRC
2288 #undef GRAB_OUR_PACKETS
2289 
2290 	/* Perform the bridge forwarding function. */
2291 	bridge_forward(sc, bif, m);
2292 
2293 	return (NULL);
2294 }
2295 
2296 /*
2297  * bridge_broadcast:
2298  *
2299  *	Send a frame to all interfaces that are members of
2300  *	the bridge, except for the one on which the packet
2301  *	arrived.
2302  *
2303  *	NOTE: Releases the lock on return.
2304  */
2305 static void
2306 bridge_broadcast(struct bridge_softc *sc, struct ifnet *src_if,
2307     struct mbuf *m, int runfilt)
2308 {
2309 	struct bridge_iflist *dbif, *sbif;
2310 	struct mbuf *mc;
2311 	struct ifnet *dst_if;
2312 	int error = 0, used = 0, i;
2313 
2314 	sbif = bridge_lookup_member_if(sc, src_if);
2315 
2316 	BRIDGE_LOCK2REF(sc, error);
2317 	if (error) {
2318 		m_freem(m);
2319 		return;
2320 	}
2321 
2322 	/* Filter on the bridge interface before broadcasting */
2323 	if (runfilt && (PFIL_HOOKED(&inet_pfil_hook)
2324 #ifdef INET6
2325 	    || PFIL_HOOKED(&inet6_pfil_hook)
2326 #endif
2327 	    )) {
2328 		if (bridge_pfil(&m, sc->sc_ifp, NULL, PFIL_OUT) != 0)
2329 			goto out;
2330 		if (m == NULL)
2331 			goto out;
2332 	}
2333 
2334 	LIST_FOREACH(dbif, &sc->sc_iflist, bif_next) {
2335 		dst_if = dbif->bif_ifp;
2336 		if (dst_if == src_if)
2337 			continue;
2338 
2339 		/* Private segments can not talk to each other */
2340 		if (sbif && (sbif->bif_flags & dbif->bif_flags & IFBIF_PRIVATE))
2341 			continue;
2342 
2343 		if ((dbif->bif_flags & IFBIF_STP) &&
2344 		    dbif->bif_stp.bp_state == BSTP_IFSTATE_DISCARDING)
2345 			continue;
2346 
2347 		if ((dbif->bif_flags & IFBIF_DISCOVER) == 0 &&
2348 		    (m->m_flags & (M_BCAST|M_MCAST)) == 0)
2349 			continue;
2350 
2351 		if ((dst_if->if_drv_flags & IFF_DRV_RUNNING) == 0)
2352 			continue;
2353 
2354 		if (LIST_NEXT(dbif, bif_next) == NULL) {
2355 			mc = m;
2356 			used = 1;
2357 		} else {
2358 			mc = m_dup(m, M_DONTWAIT);
2359 			if (mc == NULL) {
2360 				sc->sc_ifp->if_oerrors++;
2361 				continue;
2362 			}
2363 		}
2364 
2365 		/*
2366 		 * Filter on the output interface. Pass a NULL bridge interface
2367 		 * pointer so we do not redundantly filter on the bridge for
2368 		 * each interface we broadcast on.
2369 		 */
2370 		if (runfilt && (PFIL_HOOKED(&inet_pfil_hook)
2371 #ifdef INET6
2372 		    || PFIL_HOOKED(&inet6_pfil_hook)
2373 #endif
2374 		    )) {
2375 			if (used == 0) {
2376 				/* Keep the layer3 header aligned */
2377 				i = min(mc->m_pkthdr.len, max_protohdr);
2378 				mc = m_copyup(mc, i, ETHER_ALIGN);
2379 				if (mc == NULL) {
2380 					sc->sc_ifp->if_oerrors++;
2381 					continue;
2382 				}
2383 			}
2384 			if (bridge_pfil(&mc, NULL, dst_if, PFIL_OUT) != 0)
2385 				continue;
2386 			if (mc == NULL)
2387 				continue;
2388 		}
2389 
2390 		bridge_enqueue(sc, dst_if, mc);
2391 	}
2392 	if (used == 0)
2393 		m_freem(m);
2394 
2395 out:
2396 	BRIDGE_UNREF(sc);
2397 }
2398 
2399 /*
2400  * bridge_span:
2401  *
2402  *	Duplicate a packet out one or more interfaces that are in span mode,
2403  *	the original mbuf is unmodified.
2404  */
2405 static void
2406 bridge_span(struct bridge_softc *sc, struct mbuf *m)
2407 {
2408 	struct bridge_iflist *bif;
2409 	struct ifnet *dst_if;
2410 	struct mbuf *mc;
2411 
2412 	if (LIST_EMPTY(&sc->sc_spanlist))
2413 		return;
2414 
2415 	LIST_FOREACH(bif, &sc->sc_spanlist, bif_next) {
2416 		dst_if = bif->bif_ifp;
2417 
2418 		if ((dst_if->if_drv_flags & IFF_DRV_RUNNING) == 0)
2419 			continue;
2420 
2421 		mc = m_copypacket(m, M_DONTWAIT);
2422 		if (mc == NULL) {
2423 			sc->sc_ifp->if_oerrors++;
2424 			continue;
2425 		}
2426 
2427 		bridge_enqueue(sc, dst_if, mc);
2428 	}
2429 }
2430 
2431 /*
2432  * bridge_rtupdate:
2433  *
2434  *	Add a bridge routing entry.
2435  */
2436 static int
2437 bridge_rtupdate(struct bridge_softc *sc, const uint8_t *dst, uint16_t vlan,
2438     struct bridge_iflist *bif, int setflags, uint8_t flags)
2439 {
2440 	struct bridge_rtnode *brt;
2441 	int error;
2442 
2443 	BRIDGE_LOCK_ASSERT(sc);
2444 
2445 	/* Check the source address is valid and not multicast. */
2446 	if (ETHER_IS_MULTICAST(dst) ||
2447 	    (dst[0] == 0 && dst[1] == 0 && dst[2] == 0 &&
2448 	     dst[3] == 0 && dst[4] == 0 && dst[5] == 0) != 0)
2449 		return (EINVAL);
2450 
2451 	/* 802.1p frames map to vlan 1 */
2452 	if (vlan == 0)
2453 		vlan = 1;
2454 
2455 	/*
2456 	 * A route for this destination might already exist.  If so,
2457 	 * update it, otherwise create a new one.
2458 	 */
2459 	if ((brt = bridge_rtnode_lookup(sc, dst, vlan)) == NULL) {
2460 		if (sc->sc_brtcnt >= sc->sc_brtmax) {
2461 			sc->sc_brtexceeded++;
2462 			return (ENOSPC);
2463 		}
2464 		/* Check per interface address limits (if enabled) */
2465 		if (bif->bif_addrmax && bif->bif_addrcnt >= bif->bif_addrmax) {
2466 			bif->bif_addrexceeded++;
2467 			return (ENOSPC);
2468 		}
2469 
2470 		/*
2471 		 * Allocate a new bridge forwarding node, and
2472 		 * initialize the expiration time and Ethernet
2473 		 * address.
2474 		 */
2475 		brt = uma_zalloc(bridge_rtnode_zone, M_NOWAIT | M_ZERO);
2476 		if (brt == NULL)
2477 			return (ENOMEM);
2478 
2479 		if (bif->bif_flags & IFBIF_STICKY)
2480 			brt->brt_flags = IFBAF_STICKY;
2481 		else
2482 			brt->brt_flags = IFBAF_DYNAMIC;
2483 
2484 		memcpy(brt->brt_addr, dst, ETHER_ADDR_LEN);
2485 		brt->brt_vlan = vlan;
2486 
2487 		if ((error = bridge_rtnode_insert(sc, brt)) != 0) {
2488 			uma_zfree(bridge_rtnode_zone, brt);
2489 			return (error);
2490 		}
2491 		brt->brt_dst = bif;
2492 		bif->bif_addrcnt++;
2493 	}
2494 
2495 	if ((brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC &&
2496 	    brt->brt_dst != bif) {
2497 		brt->brt_dst->bif_addrcnt--;
2498 		brt->brt_dst = bif;
2499 		brt->brt_dst->bif_addrcnt++;
2500 	}
2501 
2502 	if ((flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC)
2503 		brt->brt_expire = time_uptime + sc->sc_brttimeout;
2504 	if (setflags)
2505 		brt->brt_flags = flags;
2506 
2507 	return (0);
2508 }
2509 
2510 /*
2511  * bridge_rtlookup:
2512  *
2513  *	Lookup the destination interface for an address.
2514  */
2515 static struct ifnet *
2516 bridge_rtlookup(struct bridge_softc *sc, const uint8_t *addr, uint16_t vlan)
2517 {
2518 	struct bridge_rtnode *brt;
2519 
2520 	BRIDGE_LOCK_ASSERT(sc);
2521 
2522 	if ((brt = bridge_rtnode_lookup(sc, addr, vlan)) == NULL)
2523 		return (NULL);
2524 
2525 	return (brt->brt_ifp);
2526 }
2527 
2528 /*
2529  * bridge_rttrim:
2530  *
2531  *	Trim the routine table so that we have a number
2532  *	of routing entries less than or equal to the
2533  *	maximum number.
2534  */
2535 static void
2536 bridge_rttrim(struct bridge_softc *sc)
2537 {
2538 	struct bridge_rtnode *brt, *nbrt;
2539 
2540 	BRIDGE_LOCK_ASSERT(sc);
2541 
2542 	/* Make sure we actually need to do this. */
2543 	if (sc->sc_brtcnt <= sc->sc_brtmax)
2544 		return;
2545 
2546 	/* Force an aging cycle; this might trim enough addresses. */
2547 	bridge_rtage(sc);
2548 	if (sc->sc_brtcnt <= sc->sc_brtmax)
2549 		return;
2550 
2551 	LIST_FOREACH_SAFE(brt, &sc->sc_rtlist, brt_list, nbrt) {
2552 		if ((brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC) {
2553 			bridge_rtnode_destroy(sc, brt);
2554 			if (sc->sc_brtcnt <= sc->sc_brtmax)
2555 				return;
2556 		}
2557 	}
2558 }
2559 
2560 /*
2561  * bridge_timer:
2562  *
2563  *	Aging timer for the bridge.
2564  */
2565 static void
2566 bridge_timer(void *arg)
2567 {
2568 	struct bridge_softc *sc = arg;
2569 
2570 	BRIDGE_LOCK_ASSERT(sc);
2571 
2572 	bridge_rtage(sc);
2573 
2574 	if (sc->sc_ifp->if_drv_flags & IFF_DRV_RUNNING)
2575 		callout_reset(&sc->sc_brcallout,
2576 		    bridge_rtable_prune_period * hz, bridge_timer, sc);
2577 }
2578 
2579 /*
2580  * bridge_rtage:
2581  *
2582  *	Perform an aging cycle.
2583  */
2584 static void
2585 bridge_rtage(struct bridge_softc *sc)
2586 {
2587 	struct bridge_rtnode *brt, *nbrt;
2588 
2589 	BRIDGE_LOCK_ASSERT(sc);
2590 
2591 	LIST_FOREACH_SAFE(brt, &sc->sc_rtlist, brt_list, nbrt) {
2592 		if ((brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC) {
2593 			if (time_uptime >= brt->brt_expire)
2594 				bridge_rtnode_destroy(sc, brt);
2595 		}
2596 	}
2597 }
2598 
2599 /*
2600  * bridge_rtflush:
2601  *
2602  *	Remove all dynamic addresses from the bridge.
2603  */
2604 static void
2605 bridge_rtflush(struct bridge_softc *sc, int full)
2606 {
2607 	struct bridge_rtnode *brt, *nbrt;
2608 
2609 	BRIDGE_LOCK_ASSERT(sc);
2610 
2611 	LIST_FOREACH_SAFE(brt, &sc->sc_rtlist, brt_list, nbrt) {
2612 		if (full || (brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC)
2613 			bridge_rtnode_destroy(sc, brt);
2614 	}
2615 }
2616 
2617 /*
2618  * bridge_rtdaddr:
2619  *
2620  *	Remove an address from the table.
2621  */
2622 static int
2623 bridge_rtdaddr(struct bridge_softc *sc, const uint8_t *addr, uint16_t vlan)
2624 {
2625 	struct bridge_rtnode *brt;
2626 	int found = 0;
2627 
2628 	BRIDGE_LOCK_ASSERT(sc);
2629 
2630 	/*
2631 	 * If vlan is zero then we want to delete for all vlans so the lookup
2632 	 * may return more than one.
2633 	 */
2634 	while ((brt = bridge_rtnode_lookup(sc, addr, vlan)) != NULL) {
2635 		bridge_rtnode_destroy(sc, brt);
2636 		found = 1;
2637 	}
2638 
2639 	return (found ? 0 : ENOENT);
2640 }
2641 
2642 /*
2643  * bridge_rtdelete:
2644  *
2645  *	Delete routes to a speicifc member interface.
2646  */
2647 static void
2648 bridge_rtdelete(struct bridge_softc *sc, struct ifnet *ifp, int full)
2649 {
2650 	struct bridge_rtnode *brt, *nbrt;
2651 
2652 	BRIDGE_LOCK_ASSERT(sc);
2653 
2654 	LIST_FOREACH_SAFE(brt, &sc->sc_rtlist, brt_list, nbrt) {
2655 		if (brt->brt_ifp == ifp && (full ||
2656 			    (brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC))
2657 			bridge_rtnode_destroy(sc, brt);
2658 	}
2659 }
2660 
2661 /*
2662  * bridge_rtable_init:
2663  *
2664  *	Initialize the route table for this bridge.
2665  */
2666 static int
2667 bridge_rtable_init(struct bridge_softc *sc)
2668 {
2669 	int i;
2670 
2671 	sc->sc_rthash = malloc(sizeof(*sc->sc_rthash) * BRIDGE_RTHASH_SIZE,
2672 	    M_DEVBUF, M_NOWAIT);
2673 	if (sc->sc_rthash == NULL)
2674 		return (ENOMEM);
2675 
2676 	for (i = 0; i < BRIDGE_RTHASH_SIZE; i++)
2677 		LIST_INIT(&sc->sc_rthash[i]);
2678 
2679 	sc->sc_rthash_key = arc4random();
2680 
2681 	LIST_INIT(&sc->sc_rtlist);
2682 
2683 	return (0);
2684 }
2685 
2686 /*
2687  * bridge_rtable_fini:
2688  *
2689  *	Deconstruct the route table for this bridge.
2690  */
2691 static void
2692 bridge_rtable_fini(struct bridge_softc *sc)
2693 {
2694 
2695 	KASSERT(sc->sc_brtcnt == 0,
2696 	    ("%s: %d bridge routes referenced", __func__, sc->sc_brtcnt));
2697 	free(sc->sc_rthash, M_DEVBUF);
2698 }
2699 
2700 /*
2701  * The following hash function is adapted from "Hash Functions" by Bob Jenkins
2702  * ("Algorithm Alley", Dr. Dobbs Journal, September 1997).
2703  */
2704 #define	mix(a, b, c)							\
2705 do {									\
2706 	a -= b; a -= c; a ^= (c >> 13);					\
2707 	b -= c; b -= a; b ^= (a << 8);					\
2708 	c -= a; c -= b; c ^= (b >> 13);					\
2709 	a -= b; a -= c; a ^= (c >> 12);					\
2710 	b -= c; b -= a; b ^= (a << 16);					\
2711 	c -= a; c -= b; c ^= (b >> 5);					\
2712 	a -= b; a -= c; a ^= (c >> 3);					\
2713 	b -= c; b -= a; b ^= (a << 10);					\
2714 	c -= a; c -= b; c ^= (b >> 15);					\
2715 } while (/*CONSTCOND*/0)
2716 
2717 static __inline uint32_t
2718 bridge_rthash(struct bridge_softc *sc, const uint8_t *addr)
2719 {
2720 	uint32_t a = 0x9e3779b9, b = 0x9e3779b9, c = sc->sc_rthash_key;
2721 
2722 	b += addr[5] << 8;
2723 	b += addr[4];
2724 	a += addr[3] << 24;
2725 	a += addr[2] << 16;
2726 	a += addr[1] << 8;
2727 	a += addr[0];
2728 
2729 	mix(a, b, c);
2730 
2731 	return (c & BRIDGE_RTHASH_MASK);
2732 }
2733 
2734 #undef mix
2735 
2736 static int
2737 bridge_rtnode_addr_cmp(const uint8_t *a, const uint8_t *b)
2738 {
2739 	int i, d;
2740 
2741 	for (i = 0, d = 0; i < ETHER_ADDR_LEN && d == 0; i++) {
2742 		d = ((int)a[i]) - ((int)b[i]);
2743 	}
2744 
2745 	return (d);
2746 }
2747 
2748 /*
2749  * bridge_rtnode_lookup:
2750  *
2751  *	Look up a bridge route node for the specified destination. Compare the
2752  *	vlan id or if zero then just return the first match.
2753  */
2754 static struct bridge_rtnode *
2755 bridge_rtnode_lookup(struct bridge_softc *sc, const uint8_t *addr, uint16_t vlan)
2756 {
2757 	struct bridge_rtnode *brt;
2758 	uint32_t hash;
2759 	int dir;
2760 
2761 	BRIDGE_LOCK_ASSERT(sc);
2762 
2763 	hash = bridge_rthash(sc, addr);
2764 	LIST_FOREACH(brt, &sc->sc_rthash[hash], brt_hash) {
2765 		dir = bridge_rtnode_addr_cmp(addr, brt->brt_addr);
2766 		if (dir == 0 && (brt->brt_vlan == vlan || vlan == 0))
2767 			return (brt);
2768 		if (dir > 0)
2769 			return (NULL);
2770 	}
2771 
2772 	return (NULL);
2773 }
2774 
2775 /*
2776  * bridge_rtnode_insert:
2777  *
2778  *	Insert the specified bridge node into the route table.  We
2779  *	assume the entry is not already in the table.
2780  */
2781 static int
2782 bridge_rtnode_insert(struct bridge_softc *sc, struct bridge_rtnode *brt)
2783 {
2784 	struct bridge_rtnode *lbrt;
2785 	uint32_t hash;
2786 	int dir;
2787 
2788 	BRIDGE_LOCK_ASSERT(sc);
2789 
2790 	hash = bridge_rthash(sc, brt->brt_addr);
2791 
2792 	lbrt = LIST_FIRST(&sc->sc_rthash[hash]);
2793 	if (lbrt == NULL) {
2794 		LIST_INSERT_HEAD(&sc->sc_rthash[hash], brt, brt_hash);
2795 		goto out;
2796 	}
2797 
2798 	do {
2799 		dir = bridge_rtnode_addr_cmp(brt->brt_addr, lbrt->brt_addr);
2800 		if (dir == 0 && brt->brt_vlan == lbrt->brt_vlan)
2801 			return (EEXIST);
2802 		if (dir > 0) {
2803 			LIST_INSERT_BEFORE(lbrt, brt, brt_hash);
2804 			goto out;
2805 		}
2806 		if (LIST_NEXT(lbrt, brt_hash) == NULL) {
2807 			LIST_INSERT_AFTER(lbrt, brt, brt_hash);
2808 			goto out;
2809 		}
2810 		lbrt = LIST_NEXT(lbrt, brt_hash);
2811 	} while (lbrt != NULL);
2812 
2813 #ifdef DIAGNOSTIC
2814 	panic("bridge_rtnode_insert: impossible");
2815 #endif
2816 
2817 out:
2818 	LIST_INSERT_HEAD(&sc->sc_rtlist, brt, brt_list);
2819 	sc->sc_brtcnt++;
2820 
2821 	return (0);
2822 }
2823 
2824 /*
2825  * bridge_rtnode_destroy:
2826  *
2827  *	Destroy a bridge rtnode.
2828  */
2829 static void
2830 bridge_rtnode_destroy(struct bridge_softc *sc, struct bridge_rtnode *brt)
2831 {
2832 	BRIDGE_LOCK_ASSERT(sc);
2833 
2834 	LIST_REMOVE(brt, brt_hash);
2835 
2836 	LIST_REMOVE(brt, brt_list);
2837 	sc->sc_brtcnt--;
2838 	brt->brt_dst->bif_addrcnt--;
2839 	uma_zfree(bridge_rtnode_zone, brt);
2840 }
2841 
2842 /*
2843  * bridge_rtable_expire:
2844  *
2845  *	Set the expiry time for all routes on an interface.
2846  */
2847 static void
2848 bridge_rtable_expire(struct ifnet *ifp, int age)
2849 {
2850 	struct bridge_softc *sc = ifp->if_bridge;
2851 	struct bridge_rtnode *brt;
2852 
2853 	BRIDGE_LOCK(sc);
2854 
2855 	/*
2856 	 * If the age is zero then flush, otherwise set all the expiry times to
2857 	 * age for the interface
2858 	 */
2859 	if (age == 0)
2860 		bridge_rtdelete(sc, ifp, IFBF_FLUSHDYN);
2861 	else {
2862 		LIST_FOREACH(brt, &sc->sc_rtlist, brt_list) {
2863 			/* Cap the expiry time to 'age' */
2864 			if (brt->brt_ifp == ifp &&
2865 			    brt->brt_expire > time_uptime + age &&
2866 			    (brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC)
2867 				brt->brt_expire = time_uptime + age;
2868 		}
2869 	}
2870 	BRIDGE_UNLOCK(sc);
2871 }
2872 
2873 /*
2874  * bridge_state_change:
2875  *
2876  *	Callback from the bridgestp code when a port changes states.
2877  */
2878 static void
2879 bridge_state_change(struct ifnet *ifp, int state)
2880 {
2881 	struct bridge_softc *sc = ifp->if_bridge;
2882 	static const char *stpstates[] = {
2883 		"disabled",
2884 		"listening",
2885 		"learning",
2886 		"forwarding",
2887 		"blocking",
2888 		"discarding"
2889 	};
2890 
2891 	if (log_stp)
2892 		log(LOG_NOTICE, "%s: state changed to %s on %s\n",
2893 		    sc->sc_ifp->if_xname, stpstates[state], ifp->if_xname);
2894 }
2895 
2896 /*
2897  * Send bridge packets through pfil if they are one of the types pfil can deal
2898  * with, or if they are ARP or REVARP.  (pfil will pass ARP and REVARP without
2899  * question.) If *bifp or *ifp are NULL then packet filtering is skipped for
2900  * that interface.
2901  */
2902 static int
2903 bridge_pfil(struct mbuf **mp, struct ifnet *bifp, struct ifnet *ifp, int dir)
2904 {
2905 	int snap, error, i, hlen;
2906 	struct ether_header *eh1, eh2;
2907 	struct ip_fw_args args;
2908 	struct ip *ip;
2909 	struct llc llc1;
2910 	u_int16_t ether_type;
2911 
2912 	snap = 0;
2913 	error = -1;	/* Default error if not error == 0 */
2914 
2915 #if 0
2916 	/* we may return with the IP fields swapped, ensure its not shared */
2917 	KASSERT(M_WRITABLE(*mp), ("%s: modifying a shared mbuf", __func__));
2918 #endif
2919 
2920 	if (pfil_bridge == 0 && pfil_member == 0 && pfil_ipfw == 0)
2921 		return (0); /* filtering is disabled */
2922 
2923 	i = min((*mp)->m_pkthdr.len, max_protohdr);
2924 	if ((*mp)->m_len < i) {
2925 	    *mp = m_pullup(*mp, i);
2926 	    if (*mp == NULL) {
2927 		printf("%s: m_pullup failed\n", __func__);
2928 		return (-1);
2929 	    }
2930 	}
2931 
2932 	eh1 = mtod(*mp, struct ether_header *);
2933 	ether_type = ntohs(eh1->ether_type);
2934 
2935 	/*
2936 	 * Check for SNAP/LLC.
2937 	 */
2938 	if (ether_type < ETHERMTU) {
2939 		struct llc *llc2 = (struct llc *)(eh1 + 1);
2940 
2941 		if ((*mp)->m_len >= ETHER_HDR_LEN + 8 &&
2942 		    llc2->llc_dsap == LLC_SNAP_LSAP &&
2943 		    llc2->llc_ssap == LLC_SNAP_LSAP &&
2944 		    llc2->llc_control == LLC_UI) {
2945 			ether_type = htons(llc2->llc_un.type_snap.ether_type);
2946 			snap = 1;
2947 		}
2948 	}
2949 
2950 	/*
2951 	 * If we're trying to filter bridge traffic, don't look at anything
2952 	 * other than IP and ARP traffic.  If the filter doesn't understand
2953 	 * IPv6, don't allow IPv6 through the bridge either.  This is lame
2954 	 * since if we really wanted, say, an AppleTalk filter, we are hosed,
2955 	 * but of course we don't have an AppleTalk filter to begin with.
2956 	 * (Note that since pfil doesn't understand ARP it will pass *ALL*
2957 	 * ARP traffic.)
2958 	 */
2959 	switch (ether_type) {
2960 		case ETHERTYPE_ARP:
2961 		case ETHERTYPE_REVARP:
2962 			if (pfil_ipfw_arp == 0)
2963 				return (0); /* Automatically pass */
2964 			break;
2965 
2966 		case ETHERTYPE_IP:
2967 #ifdef INET6
2968 		case ETHERTYPE_IPV6:
2969 #endif /* INET6 */
2970 			break;
2971 		default:
2972 			/*
2973 			 * Check to see if the user wants to pass non-ip
2974 			 * packets, these will not be checked by pfil(9) and
2975 			 * passed unconditionally so the default is to drop.
2976 			 */
2977 			if (pfil_onlyip)
2978 				goto bad;
2979 	}
2980 
2981 	/* Strip off the Ethernet header and keep a copy. */
2982 	m_copydata(*mp, 0, ETHER_HDR_LEN, (caddr_t) &eh2);
2983 	m_adj(*mp, ETHER_HDR_LEN);
2984 
2985 	/* Strip off snap header, if present */
2986 	if (snap) {
2987 		m_copydata(*mp, 0, sizeof(struct llc), (caddr_t) &llc1);
2988 		m_adj(*mp, sizeof(struct llc));
2989 	}
2990 
2991 	/*
2992 	 * Check the IP header for alignment and errors
2993 	 */
2994 	if (dir == PFIL_IN) {
2995 		switch (ether_type) {
2996 			case ETHERTYPE_IP:
2997 				error = bridge_ip_checkbasic(mp);
2998 				break;
2999 #ifdef INET6
3000 			case ETHERTYPE_IPV6:
3001 				error = bridge_ip6_checkbasic(mp);
3002 				break;
3003 #endif /* INET6 */
3004 			default:
3005 				error = 0;
3006 		}
3007 		if (error)
3008 			goto bad;
3009 	}
3010 
3011 	if (IPFW_LOADED && pfil_ipfw != 0 && dir == PFIL_OUT && ifp != NULL) {
3012 		error = -1;
3013 		args.rule = ip_dn_claim_rule(*mp);
3014 		if (args.rule != NULL && fw_one_pass)
3015 			goto ipfwpass; /* packet already partially processed */
3016 
3017 		args.m = *mp;
3018 		args.oif = ifp;
3019 		args.next_hop = NULL;
3020 		args.eh = &eh2;
3021 		args.inp = NULL;	/* used by ipfw uid/gid/jail rules */
3022 		i = ip_fw_chk_ptr(&args);
3023 		*mp = args.m;
3024 
3025 		if (*mp == NULL)
3026 			return (error);
3027 
3028 		if (DUMMYNET_LOADED && (i == IP_FW_DUMMYNET)) {
3029 
3030 			/* put the Ethernet header back on */
3031 			M_PREPEND(*mp, ETHER_HDR_LEN, M_DONTWAIT);
3032 			if (*mp == NULL)
3033 				return (error);
3034 			bcopy(&eh2, mtod(*mp, caddr_t), ETHER_HDR_LEN);
3035 
3036 			/*
3037 			 * Pass the pkt to dummynet, which consumes it. The
3038 			 * packet will return to us via bridge_dummynet().
3039 			 */
3040 			args.oif = ifp;
3041 			ip_dn_io_ptr(mp, DN_TO_IFB_FWD, &args);
3042 			return (error);
3043 		}
3044 
3045 		if (i != IP_FW_PASS) /* drop */
3046 			goto bad;
3047 	}
3048 
3049 ipfwpass:
3050 	error = 0;
3051 
3052 	/*
3053 	 * Run the packet through pfil
3054 	 */
3055 	switch (ether_type) {
3056 	case ETHERTYPE_IP:
3057 		/*
3058 		 * before calling the firewall, swap fields the same as
3059 		 * IP does. here we assume the header is contiguous
3060 		 */
3061 		ip = mtod(*mp, struct ip *);
3062 
3063 		ip->ip_len = ntohs(ip->ip_len);
3064 		ip->ip_off = ntohs(ip->ip_off);
3065 
3066 		/*
3067 		 * Run pfil on the member interface and the bridge, both can
3068 		 * be skipped by clearing pfil_member or pfil_bridge.
3069 		 *
3070 		 * Keep the order:
3071 		 *   in_if -> bridge_if -> out_if
3072 		 */
3073 		if (pfil_bridge && dir == PFIL_OUT && bifp != NULL)
3074 			error = pfil_run_hooks(&inet_pfil_hook, mp, bifp,
3075 					dir, NULL);
3076 
3077 		if (*mp == NULL || error != 0) /* filter may consume */
3078 			break;
3079 
3080 		if (pfil_member && ifp != NULL)
3081 			error = pfil_run_hooks(&inet_pfil_hook, mp, ifp,
3082 					dir, NULL);
3083 
3084 		if (*mp == NULL || error != 0) /* filter may consume */
3085 			break;
3086 
3087 		if (pfil_bridge && dir == PFIL_IN && bifp != NULL)
3088 			error = pfil_run_hooks(&inet_pfil_hook, mp, bifp,
3089 					dir, NULL);
3090 
3091 		if (*mp == NULL || error != 0) /* filter may consume */
3092 			break;
3093 
3094 		/* check if we need to fragment the packet */
3095 		if (pfil_member && ifp != NULL && dir == PFIL_OUT) {
3096 			i = (*mp)->m_pkthdr.len;
3097 			if (i > ifp->if_mtu) {
3098 				error = bridge_fragment(ifp, *mp, &eh2, snap,
3099 					    &llc1);
3100 				return (error);
3101 			}
3102 		}
3103 
3104 		/* Recalculate the ip checksum and restore byte ordering */
3105 		ip = mtod(*mp, struct ip *);
3106 		hlen = ip->ip_hl << 2;
3107 		if (hlen < sizeof(struct ip))
3108 			goto bad;
3109 		if (hlen > (*mp)->m_len) {
3110 			if ((*mp = m_pullup(*mp, hlen)) == 0)
3111 				goto bad;
3112 			ip = mtod(*mp, struct ip *);
3113 			if (ip == NULL)
3114 				goto bad;
3115 		}
3116 		ip->ip_len = htons(ip->ip_len);
3117 		ip->ip_off = htons(ip->ip_off);
3118 		ip->ip_sum = 0;
3119 		if (hlen == sizeof(struct ip))
3120 			ip->ip_sum = in_cksum_hdr(ip);
3121 		else
3122 			ip->ip_sum = in_cksum(*mp, hlen);
3123 
3124 		break;
3125 #ifdef INET6
3126 	case ETHERTYPE_IPV6:
3127 		if (pfil_bridge && dir == PFIL_OUT && bifp != NULL)
3128 			error = pfil_run_hooks(&inet6_pfil_hook, mp, bifp,
3129 					dir, NULL);
3130 
3131 		if (*mp == NULL || error != 0) /* filter may consume */
3132 			break;
3133 
3134 		if (pfil_member && ifp != NULL)
3135 			error = pfil_run_hooks(&inet6_pfil_hook, mp, ifp,
3136 					dir, NULL);
3137 
3138 		if (*mp == NULL || error != 0) /* filter may consume */
3139 			break;
3140 
3141 		if (pfil_bridge && dir == PFIL_IN && bifp != NULL)
3142 			error = pfil_run_hooks(&inet6_pfil_hook, mp, bifp,
3143 					dir, NULL);
3144 		break;
3145 #endif
3146 	default:
3147 		error = 0;
3148 		break;
3149 	}
3150 
3151 	if (*mp == NULL)
3152 		return (error);
3153 	if (error != 0)
3154 		goto bad;
3155 
3156 	error = -1;
3157 
3158 	/*
3159 	 * Finally, put everything back the way it was and return
3160 	 */
3161 	if (snap) {
3162 		M_PREPEND(*mp, sizeof(struct llc), M_DONTWAIT);
3163 		if (*mp == NULL)
3164 			return (error);
3165 		bcopy(&llc1, mtod(*mp, caddr_t), sizeof(struct llc));
3166 	}
3167 
3168 	M_PREPEND(*mp, ETHER_HDR_LEN, M_DONTWAIT);
3169 	if (*mp == NULL)
3170 		return (error);
3171 	bcopy(&eh2, mtod(*mp, caddr_t), ETHER_HDR_LEN);
3172 
3173 	return (0);
3174 
3175 bad:
3176 	m_freem(*mp);
3177 	*mp = NULL;
3178 	return (error);
3179 }
3180 
3181 /*
3182  * Perform basic checks on header size since
3183  * pfil assumes ip_input has already processed
3184  * it for it.  Cut-and-pasted from ip_input.c.
3185  * Given how simple the IPv6 version is,
3186  * does the IPv4 version really need to be
3187  * this complicated?
3188  *
3189  * XXX Should we update ipstat here, or not?
3190  * XXX Right now we update ipstat but not
3191  * XXX csum_counter.
3192  */
3193 static int
3194 bridge_ip_checkbasic(struct mbuf **mp)
3195 {
3196 	struct mbuf *m = *mp;
3197 	struct ip *ip;
3198 	int len, hlen;
3199 	u_short sum;
3200 
3201 	if (*mp == NULL)
3202 		return (-1);
3203 
3204 	if (IP_HDR_ALIGNED_P(mtod(m, caddr_t)) == 0) {
3205 		if ((m = m_copyup(m, sizeof(struct ip),
3206 			(max_linkhdr + 3) & ~3)) == NULL) {
3207 			/* XXXJRT new stat, please */
3208 			ipstat.ips_toosmall++;
3209 			goto bad;
3210 		}
3211 	} else if (__predict_false(m->m_len < sizeof (struct ip))) {
3212 		if ((m = m_pullup(m, sizeof (struct ip))) == NULL) {
3213 			ipstat.ips_toosmall++;
3214 			goto bad;
3215 		}
3216 	}
3217 	ip = mtod(m, struct ip *);
3218 	if (ip == NULL) goto bad;
3219 
3220 	if (ip->ip_v != IPVERSION) {
3221 		ipstat.ips_badvers++;
3222 		goto bad;
3223 	}
3224 	hlen = ip->ip_hl << 2;
3225 	if (hlen < sizeof(struct ip)) { /* minimum header length */
3226 		ipstat.ips_badhlen++;
3227 		goto bad;
3228 	}
3229 	if (hlen > m->m_len) {
3230 		if ((m = m_pullup(m, hlen)) == 0) {
3231 			ipstat.ips_badhlen++;
3232 			goto bad;
3233 		}
3234 		ip = mtod(m, struct ip *);
3235 		if (ip == NULL) goto bad;
3236 	}
3237 
3238 	if (m->m_pkthdr.csum_flags & CSUM_IP_CHECKED) {
3239 		sum = !(m->m_pkthdr.csum_flags & CSUM_IP_VALID);
3240 	} else {
3241 		if (hlen == sizeof(struct ip)) {
3242 			sum = in_cksum_hdr(ip);
3243 		} else {
3244 			sum = in_cksum(m, hlen);
3245 		}
3246 	}
3247 	if (sum) {
3248 		ipstat.ips_badsum++;
3249 		goto bad;
3250 	}
3251 
3252 	/* Retrieve the packet length. */
3253 	len = ntohs(ip->ip_len);
3254 
3255 	/*
3256 	 * Check for additional length bogosity
3257 	 */
3258 	if (len < hlen) {
3259 		ipstat.ips_badlen++;
3260 		goto bad;
3261 	}
3262 
3263 	/*
3264 	 * Check that the amount of data in the buffers
3265 	 * is as at least much as the IP header would have us expect.
3266 	 * Drop packet if shorter than we expect.
3267 	 */
3268 	if (m->m_pkthdr.len < len) {
3269 		ipstat.ips_tooshort++;
3270 		goto bad;
3271 	}
3272 
3273 	/* Checks out, proceed */
3274 	*mp = m;
3275 	return (0);
3276 
3277 bad:
3278 	*mp = m;
3279 	return (-1);
3280 }
3281 
3282 #ifdef INET6
3283 /*
3284  * Same as above, but for IPv6.
3285  * Cut-and-pasted from ip6_input.c.
3286  * XXX Should we update ip6stat, or not?
3287  */
3288 static int
3289 bridge_ip6_checkbasic(struct mbuf **mp)
3290 {
3291 	struct mbuf *m = *mp;
3292 	struct ip6_hdr *ip6;
3293 
3294 	/*
3295 	 * If the IPv6 header is not aligned, slurp it up into a new
3296 	 * mbuf with space for link headers, in the event we forward
3297 	 * it.  Otherwise, if it is aligned, make sure the entire base
3298 	 * IPv6 header is in the first mbuf of the chain.
3299 	 */
3300 	if (IP6_HDR_ALIGNED_P(mtod(m, caddr_t)) == 0) {
3301 		struct ifnet *inifp = m->m_pkthdr.rcvif;
3302 		if ((m = m_copyup(m, sizeof(struct ip6_hdr),
3303 			    (max_linkhdr + 3) & ~3)) == NULL) {
3304 			/* XXXJRT new stat, please */
3305 			ip6stat.ip6s_toosmall++;
3306 			in6_ifstat_inc(inifp, ifs6_in_hdrerr);
3307 			goto bad;
3308 		}
3309 	} else if (__predict_false(m->m_len < sizeof(struct ip6_hdr))) {
3310 		struct ifnet *inifp = m->m_pkthdr.rcvif;
3311 		if ((m = m_pullup(m, sizeof(struct ip6_hdr))) == NULL) {
3312 			ip6stat.ip6s_toosmall++;
3313 			in6_ifstat_inc(inifp, ifs6_in_hdrerr);
3314 			goto bad;
3315 		}
3316 	}
3317 
3318 	ip6 = mtod(m, struct ip6_hdr *);
3319 
3320 	if ((ip6->ip6_vfc & IPV6_VERSION_MASK) != IPV6_VERSION) {
3321 		ip6stat.ip6s_badvers++;
3322 		in6_ifstat_inc(m->m_pkthdr.rcvif, ifs6_in_hdrerr);
3323 		goto bad;
3324 	}
3325 
3326 	/* Checks out, proceed */
3327 	*mp = m;
3328 	return (0);
3329 
3330 bad:
3331 	*mp = m;
3332 	return (-1);
3333 }
3334 #endif /* INET6 */
3335 
3336 /*
3337  * bridge_fragment:
3338  *
3339  *	Return a fragmented mbuf chain.
3340  */
3341 static int
3342 bridge_fragment(struct ifnet *ifp, struct mbuf *m, struct ether_header *eh,
3343     int snap, struct llc *llc)
3344 {
3345 	struct mbuf *m0;
3346 	struct ip *ip;
3347 	int error = -1;
3348 
3349 	if (m->m_len < sizeof(struct ip) &&
3350 	    (m = m_pullup(m, sizeof(struct ip))) == NULL)
3351 		goto out;
3352 	ip = mtod(m, struct ip *);
3353 
3354 	error = ip_fragment(ip, &m, ifp->if_mtu, ifp->if_hwassist,
3355 		    CSUM_DELAY_IP);
3356 	if (error)
3357 		goto out;
3358 
3359 	/* walk the chain and re-add the Ethernet header */
3360 	for (m0 = m; m0; m0 = m0->m_nextpkt) {
3361 		if (error == 0) {
3362 			if (snap) {
3363 				M_PREPEND(m0, sizeof(struct llc), M_DONTWAIT);
3364 				if (m0 == NULL) {
3365 					error = ENOBUFS;
3366 					continue;
3367 				}
3368 				bcopy(llc, mtod(m0, caddr_t),
3369 				    sizeof(struct llc));
3370 			}
3371 			M_PREPEND(m0, ETHER_HDR_LEN, M_DONTWAIT);
3372 			if (m0 == NULL) {
3373 				error = ENOBUFS;
3374 				continue;
3375 			}
3376 			bcopy(eh, mtod(m0, caddr_t), ETHER_HDR_LEN);
3377 		} else
3378 			m_freem(m);
3379 	}
3380 
3381 	if (error == 0)
3382 		ipstat.ips_fragmented++;
3383 
3384 	return (error);
3385 
3386 out:
3387 	if (m != NULL)
3388 		m_freem(m);
3389 	return (error);
3390 }
3391