xref: /freebsd/sys/net/if_bridge.c (revision 5c52a79884070364bfc920fb8e492cfac61ec72f)
1 /*	$NetBSD: if_bridge.c,v 1.31 2005/06/01 19:45:34 jdc Exp $	*/
2 
3 /*
4  * Copyright 2001 Wasabi Systems, Inc.
5  * All rights reserved.
6  *
7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. All advertising materials mentioning features or use of this software
18  *    must display the following acknowledgement:
19  *	This product includes software developed for the NetBSD Project by
20  *	Wasabi Systems, Inc.
21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22  *    or promote products derived from this software without specific prior
23  *    written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35  * POSSIBILITY OF SUCH DAMAGE.
36  */
37 
38 /*
39  * Copyright (c) 1999, 2000 Jason L. Wright (jason@thought.net)
40  * All rights reserved.
41  *
42  * Redistribution and use in source and binary forms, with or without
43  * modification, are permitted provided that the following conditions
44  * are met:
45  * 1. Redistributions of source code must retain the above copyright
46  *    notice, this list of conditions and the following disclaimer.
47  * 2. Redistributions in binary form must reproduce the above copyright
48  *    notice, this list of conditions and the following disclaimer in the
49  *    documentation and/or other materials provided with the distribution.
50  *
51  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
52  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
53  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
54  * DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
55  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
56  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
57  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
58  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
59  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
60  * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
61  * POSSIBILITY OF SUCH DAMAGE.
62  *
63  * OpenBSD: if_bridge.c,v 1.60 2001/06/15 03:38:33 itojun Exp
64  */
65 
66 /*
67  * Network interface bridge support.
68  *
69  * TODO:
70  *
71  *	- Currently only supports Ethernet-like interfaces (Ethernet,
72  *	  802.11, VLANs on Ethernet, etc.)  Figure out a nice way
73  *	  to bridge other types of interfaces (FDDI-FDDI, and maybe
74  *	  consider heterogenous bridges).
75  */
76 
77 #include <sys/cdefs.h>
78 __FBSDID("$FreeBSD$");
79 
80 #include "opt_inet.h"
81 #include "opt_inet6.h"
82 #include "opt_carp.h"
83 
84 #include <sys/param.h>
85 #include <sys/mbuf.h>
86 #include <sys/malloc.h>
87 #include <sys/protosw.h>
88 #include <sys/systm.h>
89 #include <sys/time.h>
90 #include <sys/socket.h> /* for net/if.h */
91 #include <sys/sockio.h>
92 #include <sys/ctype.h>  /* string functions */
93 #include <sys/kernel.h>
94 #include <sys/random.h>
95 #include <sys/syslog.h>
96 #include <sys/sysctl.h>
97 #include <vm/uma.h>
98 #include <sys/module.h>
99 #include <sys/priv.h>
100 #include <sys/proc.h>
101 #include <sys/lock.h>
102 #include <sys/mutex.h>
103 #include <sys/vimage.h>
104 
105 #include <net/bpf.h>
106 #include <net/if.h>
107 #include <net/if_clone.h>
108 #include <net/if_dl.h>
109 #include <net/if_types.h>
110 #include <net/if_var.h>
111 #include <net/pfil.h>
112 
113 #include <netinet/in.h> /* for struct arpcom */
114 #include <netinet/in_systm.h>
115 #include <netinet/in_var.h>
116 #include <netinet/ip.h>
117 #include <netinet/ip_var.h>
118 #include <netinet/vinet.h>
119 #ifdef INET6
120 #include <netinet/ip6.h>
121 #include <netinet6/ip6_var.h>
122 #include <netinet6/vinet6.h>
123 #endif
124 #ifdef DEV_CARP
125 #include <netinet/ip_carp.h>
126 #endif
127 #include <machine/in_cksum.h>
128 #include <netinet/if_ether.h> /* for struct arpcom */
129 #include <net/bridgestp.h>
130 #include <net/if_bridgevar.h>
131 #include <net/if_llc.h>
132 #include <net/if_vlan_var.h>
133 
134 #include <net/route.h>
135 #include <netinet/ip_fw.h>
136 #include <netinet/ip_dummynet.h>
137 
138 /*
139  * Size of the route hash table.  Must be a power of two.
140  */
141 #ifndef BRIDGE_RTHASH_SIZE
142 #define	BRIDGE_RTHASH_SIZE		1024
143 #endif
144 
145 #define	BRIDGE_RTHASH_MASK		(BRIDGE_RTHASH_SIZE - 1)
146 
147 /*
148  * Maximum number of addresses to cache.
149  */
150 #ifndef BRIDGE_RTABLE_MAX
151 #define	BRIDGE_RTABLE_MAX		100
152 #endif
153 
154 /*
155  * Timeout (in seconds) for entries learned dynamically.
156  */
157 #ifndef BRIDGE_RTABLE_TIMEOUT
158 #define	BRIDGE_RTABLE_TIMEOUT		(20 * 60)	/* same as ARP */
159 #endif
160 
161 /*
162  * Number of seconds between walks of the route list.
163  */
164 #ifndef BRIDGE_RTABLE_PRUNE_PERIOD
165 #define	BRIDGE_RTABLE_PRUNE_PERIOD	(5 * 60)
166 #endif
167 
168 /*
169  * List of capabilities to possibly mask on the member interface.
170  */
171 #define	BRIDGE_IFCAPS_MASK		(IFCAP_TOE|IFCAP_TSO|IFCAP_TXCSUM)
172 
173 /*
174  * Bridge interface list entry.
175  */
176 struct bridge_iflist {
177 	LIST_ENTRY(bridge_iflist) bif_next;
178 	struct ifnet		*bif_ifp;	/* member if */
179 	struct bstp_port	bif_stp;	/* STP state */
180 	uint32_t		bif_flags;	/* member if flags */
181 	int			bif_savedcaps;	/* saved capabilities */
182 	uint32_t		bif_addrmax;	/* max # of addresses */
183 	uint32_t		bif_addrcnt;	/* cur. # of addresses */
184 	uint32_t		bif_addrexceeded;/* # of address violations */
185 };
186 
187 /*
188  * Bridge route node.
189  */
190 struct bridge_rtnode {
191 	LIST_ENTRY(bridge_rtnode) brt_hash;	/* hash table linkage */
192 	LIST_ENTRY(bridge_rtnode) brt_list;	/* list linkage */
193 	struct bridge_iflist	*brt_dst;	/* destination if */
194 	unsigned long		brt_expire;	/* expiration time */
195 	uint8_t			brt_flags;	/* address flags */
196 	uint8_t			brt_addr[ETHER_ADDR_LEN];
197 	uint16_t		brt_vlan;	/* vlan id */
198 };
199 #define	brt_ifp			brt_dst->bif_ifp
200 
201 /*
202  * Software state for each bridge.
203  */
204 struct bridge_softc {
205 	struct ifnet		*sc_ifp;	/* make this an interface */
206 	LIST_ENTRY(bridge_softc) sc_list;
207 	struct mtx		sc_mtx;
208 	struct cv		sc_cv;
209 	uint32_t		sc_brtmax;	/* max # of addresses */
210 	uint32_t		sc_brtcnt;	/* cur. # of addresses */
211 	uint32_t		sc_brttimeout;	/* rt timeout in seconds */
212 	struct callout		sc_brcallout;	/* bridge callout */
213 	uint32_t		sc_iflist_ref;	/* refcount for sc_iflist */
214 	uint32_t		sc_iflist_xcnt;	/* refcount for sc_iflist */
215 	LIST_HEAD(, bridge_iflist) sc_iflist;	/* member interface list */
216 	LIST_HEAD(, bridge_rtnode) *sc_rthash;	/* our forwarding table */
217 	LIST_HEAD(, bridge_rtnode) sc_rtlist;	/* list version of above */
218 	uint32_t		sc_rthash_key;	/* key for hash */
219 	LIST_HEAD(, bridge_iflist) sc_spanlist;	/* span ports list */
220 	struct bstp_state	sc_stp;		/* STP state */
221 	uint32_t		sc_brtexceeded;	/* # of cache drops */
222 	u_char			sc_defaddr[6];	/* Default MAC address */
223 };
224 
225 static struct mtx 	bridge_list_mtx;
226 eventhandler_tag	bridge_detach_cookie = NULL;
227 
228 int	bridge_rtable_prune_period = BRIDGE_RTABLE_PRUNE_PERIOD;
229 
230 uma_zone_t bridge_rtnode_zone;
231 
232 static int	bridge_clone_create(struct if_clone *, int, caddr_t);
233 static void	bridge_clone_destroy(struct ifnet *);
234 
235 static int	bridge_ioctl(struct ifnet *, u_long, caddr_t);
236 static void	bridge_mutecaps(struct bridge_softc *);
237 static void	bridge_set_ifcap(struct bridge_softc *, struct bridge_iflist *,
238 		    int);
239 static void	bridge_ifdetach(void *arg __unused, struct ifnet *);
240 static void	bridge_init(void *);
241 static void	bridge_dummynet(struct mbuf *, struct ifnet *);
242 static void	bridge_stop(struct ifnet *, int);
243 static void	bridge_start(struct ifnet *);
244 static struct mbuf *bridge_input(struct ifnet *, struct mbuf *);
245 static int	bridge_output(struct ifnet *, struct mbuf *, struct sockaddr *,
246 		    struct rtentry *);
247 static void	bridge_enqueue(struct bridge_softc *, struct ifnet *,
248 		    struct mbuf *);
249 static void	bridge_rtdelete(struct bridge_softc *, struct ifnet *ifp, int);
250 
251 static void	bridge_forward(struct bridge_softc *, struct bridge_iflist *,
252 		    struct mbuf *m);
253 
254 static void	bridge_timer(void *);
255 
256 static void	bridge_broadcast(struct bridge_softc *, struct ifnet *,
257 		    struct mbuf *, int);
258 static void	bridge_span(struct bridge_softc *, struct mbuf *);
259 
260 static int	bridge_rtupdate(struct bridge_softc *, const uint8_t *,
261 		    uint16_t, struct bridge_iflist *, int, uint8_t);
262 static struct ifnet *bridge_rtlookup(struct bridge_softc *, const uint8_t *,
263 		    uint16_t);
264 static void	bridge_rttrim(struct bridge_softc *);
265 static void	bridge_rtage(struct bridge_softc *);
266 static void	bridge_rtflush(struct bridge_softc *, int);
267 static int	bridge_rtdaddr(struct bridge_softc *, const uint8_t *,
268 		    uint16_t);
269 
270 static int	bridge_rtable_init(struct bridge_softc *);
271 static void	bridge_rtable_fini(struct bridge_softc *);
272 
273 static int	bridge_rtnode_addr_cmp(const uint8_t *, const uint8_t *);
274 static struct bridge_rtnode *bridge_rtnode_lookup(struct bridge_softc *,
275 		    const uint8_t *, uint16_t);
276 static int	bridge_rtnode_insert(struct bridge_softc *,
277 		    struct bridge_rtnode *);
278 static void	bridge_rtnode_destroy(struct bridge_softc *,
279 		    struct bridge_rtnode *);
280 static void	bridge_rtable_expire(struct ifnet *, int);
281 static void	bridge_state_change(struct ifnet *, int);
282 
283 static struct bridge_iflist *bridge_lookup_member(struct bridge_softc *,
284 		    const char *name);
285 static struct bridge_iflist *bridge_lookup_member_if(struct bridge_softc *,
286 		    struct ifnet *ifp);
287 static void	bridge_delete_member(struct bridge_softc *,
288 		    struct bridge_iflist *, int);
289 static void	bridge_delete_span(struct bridge_softc *,
290 		    struct bridge_iflist *);
291 
292 static int	bridge_ioctl_add(struct bridge_softc *, void *);
293 static int	bridge_ioctl_del(struct bridge_softc *, void *);
294 static int	bridge_ioctl_gifflags(struct bridge_softc *, void *);
295 static int	bridge_ioctl_sifflags(struct bridge_softc *, void *);
296 static int	bridge_ioctl_scache(struct bridge_softc *, void *);
297 static int	bridge_ioctl_gcache(struct bridge_softc *, void *);
298 static int	bridge_ioctl_gifs(struct bridge_softc *, void *);
299 static int	bridge_ioctl_rts(struct bridge_softc *, void *);
300 static int	bridge_ioctl_saddr(struct bridge_softc *, void *);
301 static int	bridge_ioctl_sto(struct bridge_softc *, void *);
302 static int	bridge_ioctl_gto(struct bridge_softc *, void *);
303 static int	bridge_ioctl_daddr(struct bridge_softc *, void *);
304 static int	bridge_ioctl_flush(struct bridge_softc *, void *);
305 static int	bridge_ioctl_gpri(struct bridge_softc *, void *);
306 static int	bridge_ioctl_spri(struct bridge_softc *, void *);
307 static int	bridge_ioctl_ght(struct bridge_softc *, void *);
308 static int	bridge_ioctl_sht(struct bridge_softc *, void *);
309 static int	bridge_ioctl_gfd(struct bridge_softc *, void *);
310 static int	bridge_ioctl_sfd(struct bridge_softc *, void *);
311 static int	bridge_ioctl_gma(struct bridge_softc *, void *);
312 static int	bridge_ioctl_sma(struct bridge_softc *, void *);
313 static int	bridge_ioctl_sifprio(struct bridge_softc *, void *);
314 static int	bridge_ioctl_sifcost(struct bridge_softc *, void *);
315 static int	bridge_ioctl_sifmaxaddr(struct bridge_softc *, void *);
316 static int	bridge_ioctl_addspan(struct bridge_softc *, void *);
317 static int	bridge_ioctl_delspan(struct bridge_softc *, void *);
318 static int	bridge_ioctl_gbparam(struct bridge_softc *, void *);
319 static int	bridge_ioctl_grte(struct bridge_softc *, void *);
320 static int	bridge_ioctl_gifsstp(struct bridge_softc *, void *);
321 static int	bridge_ioctl_sproto(struct bridge_softc *, void *);
322 static int	bridge_ioctl_stxhc(struct bridge_softc *, void *);
323 static int	bridge_pfil(struct mbuf **, struct ifnet *, struct ifnet *,
324 		    int);
325 static int	bridge_ip_checkbasic(struct mbuf **mp);
326 #ifdef INET6
327 static int	bridge_ip6_checkbasic(struct mbuf **mp);
328 #endif /* INET6 */
329 static int	bridge_fragment(struct ifnet *, struct mbuf *,
330 		    struct ether_header *, int, struct llc *);
331 
332 /* The default bridge vlan is 1 (IEEE 802.1Q-2003 Table 9-2) */
333 #define	VLANTAGOF(_m)	\
334     (_m->m_flags & M_VLANTAG) ? EVL_VLANOFTAG(_m->m_pkthdr.ether_vtag) : 1
335 
336 static struct bstp_cb_ops bridge_ops = {
337 	.bcb_state = bridge_state_change,
338 	.bcb_rtage = bridge_rtable_expire
339 };
340 
341 SYSCTL_DECL(_net_link);
342 SYSCTL_NODE(_net_link, IFT_BRIDGE, bridge, CTLFLAG_RW, 0, "Bridge");
343 
344 static int pfil_onlyip = 1; /* only pass IP[46] packets when pfil is enabled */
345 static int pfil_bridge = 1; /* run pfil hooks on the bridge interface */
346 static int pfil_member = 1; /* run pfil hooks on the member interface */
347 static int pfil_ipfw = 0;   /* layer2 filter with ipfw */
348 static int pfil_ipfw_arp = 0;   /* layer2 filter with ipfw */
349 static int pfil_local_phys = 0; /* run pfil hooks on the physical interface for
350                                    locally destined packets */
351 static int log_stp   = 0;   /* log STP state changes */
352 static int bridge_inherit_mac = 0;   /* share MAC with first bridge member */
353 SYSCTL_INT(_net_link_bridge, OID_AUTO, pfil_onlyip, CTLFLAG_RW,
354     &pfil_onlyip, 0, "Only pass IP packets when pfil is enabled");
355 SYSCTL_INT(_net_link_bridge, OID_AUTO, ipfw_arp, CTLFLAG_RW,
356     &pfil_ipfw_arp, 0, "Filter ARP packets through IPFW layer2");
357 SYSCTL_INT(_net_link_bridge, OID_AUTO, pfil_bridge, CTLFLAG_RW,
358     &pfil_bridge, 0, "Packet filter on the bridge interface");
359 SYSCTL_INT(_net_link_bridge, OID_AUTO, pfil_member, CTLFLAG_RW,
360     &pfil_member, 0, "Packet filter on the member interface");
361 SYSCTL_INT(_net_link_bridge, OID_AUTO, pfil_local_phys, CTLFLAG_RW,
362     &pfil_local_phys, 0,
363     "Packet filter on the physical interface for locally destined packets");
364 SYSCTL_INT(_net_link_bridge, OID_AUTO, log_stp, CTLFLAG_RW,
365     &log_stp, 0, "Log STP state changes");
366 SYSCTL_INT(_net_link_bridge, OID_AUTO, inherit_mac, CTLFLAG_RW,
367     &bridge_inherit_mac, 0,
368     "Inherit MAC address from the first bridge member");
369 
370 struct bridge_control {
371 	int	(*bc_func)(struct bridge_softc *, void *);
372 	int	bc_argsize;
373 	int	bc_flags;
374 };
375 
376 #define	BC_F_COPYIN		0x01	/* copy arguments in */
377 #define	BC_F_COPYOUT		0x02	/* copy arguments out */
378 #define	BC_F_SUSER		0x04	/* do super-user check */
379 
380 const struct bridge_control bridge_control_table[] = {
381 	{ bridge_ioctl_add,		sizeof(struct ifbreq),
382 	  BC_F_COPYIN|BC_F_SUSER },
383 	{ bridge_ioctl_del,		sizeof(struct ifbreq),
384 	  BC_F_COPYIN|BC_F_SUSER },
385 
386 	{ bridge_ioctl_gifflags,	sizeof(struct ifbreq),
387 	  BC_F_COPYIN|BC_F_COPYOUT },
388 	{ bridge_ioctl_sifflags,	sizeof(struct ifbreq),
389 	  BC_F_COPYIN|BC_F_SUSER },
390 
391 	{ bridge_ioctl_scache,		sizeof(struct ifbrparam),
392 	  BC_F_COPYIN|BC_F_SUSER },
393 	{ bridge_ioctl_gcache,		sizeof(struct ifbrparam),
394 	  BC_F_COPYOUT },
395 
396 	{ bridge_ioctl_gifs,		sizeof(struct ifbifconf),
397 	  BC_F_COPYIN|BC_F_COPYOUT },
398 	{ bridge_ioctl_rts,		sizeof(struct ifbaconf),
399 	  BC_F_COPYIN|BC_F_COPYOUT },
400 
401 	{ bridge_ioctl_saddr,		sizeof(struct ifbareq),
402 	  BC_F_COPYIN|BC_F_SUSER },
403 
404 	{ bridge_ioctl_sto,		sizeof(struct ifbrparam),
405 	  BC_F_COPYIN|BC_F_SUSER },
406 	{ bridge_ioctl_gto,		sizeof(struct ifbrparam),
407 	  BC_F_COPYOUT },
408 
409 	{ bridge_ioctl_daddr,		sizeof(struct ifbareq),
410 	  BC_F_COPYIN|BC_F_SUSER },
411 
412 	{ bridge_ioctl_flush,		sizeof(struct ifbreq),
413 	  BC_F_COPYIN|BC_F_SUSER },
414 
415 	{ bridge_ioctl_gpri,		sizeof(struct ifbrparam),
416 	  BC_F_COPYOUT },
417 	{ bridge_ioctl_spri,		sizeof(struct ifbrparam),
418 	  BC_F_COPYIN|BC_F_SUSER },
419 
420 	{ bridge_ioctl_ght,		sizeof(struct ifbrparam),
421 	  BC_F_COPYOUT },
422 	{ bridge_ioctl_sht,		sizeof(struct ifbrparam),
423 	  BC_F_COPYIN|BC_F_SUSER },
424 
425 	{ bridge_ioctl_gfd,		sizeof(struct ifbrparam),
426 	  BC_F_COPYOUT },
427 	{ bridge_ioctl_sfd,		sizeof(struct ifbrparam),
428 	  BC_F_COPYIN|BC_F_SUSER },
429 
430 	{ bridge_ioctl_gma,		sizeof(struct ifbrparam),
431 	  BC_F_COPYOUT },
432 	{ bridge_ioctl_sma,		sizeof(struct ifbrparam),
433 	  BC_F_COPYIN|BC_F_SUSER },
434 
435 	{ bridge_ioctl_sifprio,		sizeof(struct ifbreq),
436 	  BC_F_COPYIN|BC_F_SUSER },
437 
438 	{ bridge_ioctl_sifcost,		sizeof(struct ifbreq),
439 	  BC_F_COPYIN|BC_F_SUSER },
440 
441 	{ bridge_ioctl_addspan,		sizeof(struct ifbreq),
442 	  BC_F_COPYIN|BC_F_SUSER },
443 	{ bridge_ioctl_delspan,		sizeof(struct ifbreq),
444 	  BC_F_COPYIN|BC_F_SUSER },
445 
446 	{ bridge_ioctl_gbparam,		sizeof(struct ifbropreq),
447 	  BC_F_COPYOUT },
448 
449 	{ bridge_ioctl_grte,		sizeof(struct ifbrparam),
450 	  BC_F_COPYOUT },
451 
452 	{ bridge_ioctl_gifsstp,		sizeof(struct ifbpstpconf),
453 	  BC_F_COPYIN|BC_F_COPYOUT },
454 
455 	{ bridge_ioctl_sproto,		sizeof(struct ifbrparam),
456 	  BC_F_COPYIN|BC_F_SUSER },
457 
458 	{ bridge_ioctl_stxhc,		sizeof(struct ifbrparam),
459 	  BC_F_COPYIN|BC_F_SUSER },
460 
461 	{ bridge_ioctl_sifmaxaddr,	sizeof(struct ifbreq),
462 	  BC_F_COPYIN|BC_F_SUSER },
463 
464 };
465 const int bridge_control_table_size =
466     sizeof(bridge_control_table) / sizeof(bridge_control_table[0]);
467 
468 LIST_HEAD(, bridge_softc) bridge_list;
469 
470 IFC_SIMPLE_DECLARE(bridge, 0);
471 
472 static int
473 bridge_modevent(module_t mod, int type, void *data)
474 {
475 
476 	switch (type) {
477 	case MOD_LOAD:
478 		mtx_init(&bridge_list_mtx, "if_bridge list", NULL, MTX_DEF);
479 		if_clone_attach(&bridge_cloner);
480 		bridge_rtnode_zone = uma_zcreate("bridge_rtnode",
481 		    sizeof(struct bridge_rtnode), NULL, NULL, NULL, NULL,
482 		    UMA_ALIGN_PTR, 0);
483 		LIST_INIT(&bridge_list);
484 		bridge_input_p = bridge_input;
485 		bridge_output_p = bridge_output;
486 		bridge_dn_p = bridge_dummynet;
487 		bridge_detach_cookie = EVENTHANDLER_REGISTER(
488 		    ifnet_departure_event, bridge_ifdetach, NULL,
489 		    EVENTHANDLER_PRI_ANY);
490 		break;
491 	case MOD_UNLOAD:
492 		EVENTHANDLER_DEREGISTER(ifnet_departure_event,
493 		    bridge_detach_cookie);
494 		if_clone_detach(&bridge_cloner);
495 		uma_zdestroy(bridge_rtnode_zone);
496 		bridge_input_p = NULL;
497 		bridge_output_p = NULL;
498 		bridge_dn_p = NULL;
499 		mtx_destroy(&bridge_list_mtx);
500 		break;
501 	default:
502 		return (EOPNOTSUPP);
503 	}
504 	return (0);
505 }
506 
507 static moduledata_t bridge_mod = {
508 	"if_bridge",
509 	bridge_modevent,
510 	0
511 };
512 
513 DECLARE_MODULE(if_bridge, bridge_mod, SI_SUB_PSEUDO, SI_ORDER_ANY);
514 MODULE_DEPEND(if_bridge, bridgestp, 1, 1, 1);
515 
516 /*
517  * handler for net.link.bridge.pfil_ipfw
518  */
519 static int
520 sysctl_pfil_ipfw(SYSCTL_HANDLER_ARGS)
521 {
522 	int enable = pfil_ipfw;
523 	int error;
524 
525 	error = sysctl_handle_int(oidp, &enable, 0, req);
526 	enable = (enable) ? 1 : 0;
527 
528 	if (enable != pfil_ipfw) {
529 		pfil_ipfw = enable;
530 
531 		/*
532 		 * Disable pfil so that ipfw doesnt run twice, if the user
533 		 * really wants both then they can re-enable pfil_bridge and/or
534 		 * pfil_member. Also allow non-ip packets as ipfw can filter by
535 		 * layer2 type.
536 		 */
537 		if (pfil_ipfw) {
538 			pfil_onlyip = 0;
539 			pfil_bridge = 0;
540 			pfil_member = 0;
541 		}
542 	}
543 
544 	return (error);
545 }
546 SYSCTL_PROC(_net_link_bridge, OID_AUTO, ipfw, CTLTYPE_INT|CTLFLAG_RW,
547 	    &pfil_ipfw, 0, &sysctl_pfil_ipfw, "I", "Layer2 filter with IPFW");
548 
549 /*
550  * bridge_clone_create:
551  *
552  *	Create a new bridge instance.
553  */
554 static int
555 bridge_clone_create(struct if_clone *ifc, int unit, caddr_t params)
556 {
557 	struct bridge_softc *sc, *sc2;
558 	struct ifnet *bifp, *ifp;
559 	int retry;
560 
561 	sc = malloc(sizeof(*sc), M_DEVBUF, M_WAITOK|M_ZERO);
562 	ifp = sc->sc_ifp = if_alloc(IFT_ETHER);
563 	if (ifp == NULL) {
564 		free(sc, M_DEVBUF);
565 		return (ENOSPC);
566 	}
567 
568 	BRIDGE_LOCK_INIT(sc);
569 	sc->sc_brtmax = BRIDGE_RTABLE_MAX;
570 	sc->sc_brttimeout = BRIDGE_RTABLE_TIMEOUT;
571 
572 	/* Initialize our routing table. */
573 	bridge_rtable_init(sc);
574 
575 	callout_init_mtx(&sc->sc_brcallout, &sc->sc_mtx, 0);
576 
577 	LIST_INIT(&sc->sc_iflist);
578 	LIST_INIT(&sc->sc_spanlist);
579 
580 	ifp->if_softc = sc;
581 	if_initname(ifp, ifc->ifc_name, unit);
582 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
583 	ifp->if_ioctl = bridge_ioctl;
584 	ifp->if_start = bridge_start;
585 	ifp->if_init = bridge_init;
586 	ifp->if_type = IFT_BRIDGE;
587 	IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen);
588 	ifp->if_snd.ifq_drv_maxlen = ifqmaxlen;
589 	IFQ_SET_READY(&ifp->if_snd);
590 
591 	/*
592 	 * Generate a random ethernet address with a locally administered
593 	 * address.
594 	 *
595 	 * Since we are using random ethernet addresses for the bridge, it is
596 	 * possible that we might have address collisions, so make sure that
597 	 * this hardware address isn't already in use on another bridge.
598 	 */
599 	for (retry = 1; retry != 0;) {
600 		arc4rand(sc->sc_defaddr, ETHER_ADDR_LEN, 1);
601 		sc->sc_defaddr[0] &= ~1;	/* clear multicast bit */
602 		sc->sc_defaddr[0] |= 2;		/* set the LAA bit */
603 		retry = 0;
604 		mtx_lock(&bridge_list_mtx);
605 		LIST_FOREACH(sc2, &bridge_list, sc_list) {
606 			bifp = sc2->sc_ifp;
607 			if (memcmp(sc->sc_defaddr,
608 			    IF_LLADDR(bifp), ETHER_ADDR_LEN) == 0)
609 				retry = 1;
610 		}
611 		mtx_unlock(&bridge_list_mtx);
612 	}
613 
614 	bstp_attach(&sc->sc_stp, &bridge_ops);
615 	ether_ifattach(ifp, sc->sc_defaddr);
616 	/* Now undo some of the damage... */
617 	ifp->if_baudrate = 0;
618 	ifp->if_type = IFT_BRIDGE;
619 
620 	mtx_lock(&bridge_list_mtx);
621 	LIST_INSERT_HEAD(&bridge_list, sc, sc_list);
622 	mtx_unlock(&bridge_list_mtx);
623 
624 	return (0);
625 }
626 
627 /*
628  * bridge_clone_destroy:
629  *
630  *	Destroy a bridge instance.
631  */
632 static void
633 bridge_clone_destroy(struct ifnet *ifp)
634 {
635 	struct bridge_softc *sc = ifp->if_softc;
636 	struct bridge_iflist *bif;
637 
638 	BRIDGE_LOCK(sc);
639 
640 	bridge_stop(ifp, 1);
641 	ifp->if_flags &= ~IFF_UP;
642 
643 	while ((bif = LIST_FIRST(&sc->sc_iflist)) != NULL)
644 		bridge_delete_member(sc, bif, 0);
645 
646 	while ((bif = LIST_FIRST(&sc->sc_spanlist)) != NULL) {
647 		bridge_delete_span(sc, bif);
648 	}
649 
650 	BRIDGE_UNLOCK(sc);
651 
652 	callout_drain(&sc->sc_brcallout);
653 
654 	mtx_lock(&bridge_list_mtx);
655 	LIST_REMOVE(sc, sc_list);
656 	mtx_unlock(&bridge_list_mtx);
657 
658 	bstp_detach(&sc->sc_stp);
659 	ether_ifdetach(ifp);
660 	if_free_type(ifp, IFT_ETHER);
661 
662 	/* Tear down the routing table. */
663 	bridge_rtable_fini(sc);
664 
665 	BRIDGE_LOCK_DESTROY(sc);
666 	free(sc, M_DEVBUF);
667 }
668 
669 /*
670  * bridge_ioctl:
671  *
672  *	Handle a control request from the operator.
673  */
674 static int
675 bridge_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
676 {
677 	struct bridge_softc *sc = ifp->if_softc;
678 	struct thread *td = curthread;
679 	union {
680 		struct ifbreq ifbreq;
681 		struct ifbifconf ifbifconf;
682 		struct ifbareq ifbareq;
683 		struct ifbaconf ifbaconf;
684 		struct ifbrparam ifbrparam;
685 		struct ifbropreq ifbropreq;
686 	} args;
687 	struct ifdrv *ifd = (struct ifdrv *) data;
688 	const struct bridge_control *bc;
689 	int error = 0;
690 
691 	switch (cmd) {
692 
693 	case SIOCADDMULTI:
694 	case SIOCDELMULTI:
695 		break;
696 
697 	case SIOCGDRVSPEC:
698 	case SIOCSDRVSPEC:
699 		if (ifd->ifd_cmd >= bridge_control_table_size) {
700 			error = EINVAL;
701 			break;
702 		}
703 		bc = &bridge_control_table[ifd->ifd_cmd];
704 
705 		if (cmd == SIOCGDRVSPEC &&
706 		    (bc->bc_flags & BC_F_COPYOUT) == 0) {
707 			error = EINVAL;
708 			break;
709 		}
710 		else if (cmd == SIOCSDRVSPEC &&
711 		    (bc->bc_flags & BC_F_COPYOUT) != 0) {
712 			error = EINVAL;
713 			break;
714 		}
715 
716 		if (bc->bc_flags & BC_F_SUSER) {
717 			error = priv_check(td, PRIV_NET_BRIDGE);
718 			if (error)
719 				break;
720 		}
721 
722 		if (ifd->ifd_len != bc->bc_argsize ||
723 		    ifd->ifd_len > sizeof(args)) {
724 			error = EINVAL;
725 			break;
726 		}
727 
728 		bzero(&args, sizeof(args));
729 		if (bc->bc_flags & BC_F_COPYIN) {
730 			error = copyin(ifd->ifd_data, &args, ifd->ifd_len);
731 			if (error)
732 				break;
733 		}
734 
735 		BRIDGE_LOCK(sc);
736 		error = (*bc->bc_func)(sc, &args);
737 		BRIDGE_UNLOCK(sc);
738 		if (error)
739 			break;
740 
741 		if (bc->bc_flags & BC_F_COPYOUT)
742 			error = copyout(&args, ifd->ifd_data, ifd->ifd_len);
743 
744 		break;
745 
746 	case SIOCSIFFLAGS:
747 		if (!(ifp->if_flags & IFF_UP) &&
748 		    (ifp->if_drv_flags & IFF_DRV_RUNNING)) {
749 			/*
750 			 * If interface is marked down and it is running,
751 			 * then stop and disable it.
752 			 */
753 			BRIDGE_LOCK(sc);
754 			bridge_stop(ifp, 1);
755 			BRIDGE_UNLOCK(sc);
756 		} else if ((ifp->if_flags & IFF_UP) &&
757 		    !(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
758 			/*
759 			 * If interface is marked up and it is stopped, then
760 			 * start it.
761 			 */
762 			(*ifp->if_init)(sc);
763 		}
764 		break;
765 
766 	case SIOCSIFMTU:
767 		/* Do not allow the MTU to be changed on the bridge */
768 		error = EINVAL;
769 		break;
770 
771 	default:
772 		/*
773 		 * drop the lock as ether_ioctl() will call bridge_start() and
774 		 * cause the lock to be recursed.
775 		 */
776 		error = ether_ioctl(ifp, cmd, data);
777 		break;
778 	}
779 
780 	return (error);
781 }
782 
783 /*
784  * bridge_mutecaps:
785  *
786  *	Clear or restore unwanted capabilities on the member interface
787  */
788 static void
789 bridge_mutecaps(struct bridge_softc *sc)
790 {
791 	struct bridge_iflist *bif;
792 	int enabled, mask;
793 
794 	/* Initial bitmask of capabilities to test */
795 	mask = BRIDGE_IFCAPS_MASK;
796 
797 	LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
798 		/* Every member must support it or its disabled */
799 		mask &= bif->bif_savedcaps;
800 	}
801 
802 	LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
803 		enabled = bif->bif_ifp->if_capenable;
804 		/* strip off mask bits and enable them again if allowed */
805 		enabled &= ~BRIDGE_IFCAPS_MASK;
806 		enabled |= mask;
807 		/*
808 		 * Receive offload can only be enabled if all members also
809 		 * support send offload.
810 		 */
811 		if ((enabled & IFCAP_TSO) == 0)
812 			enabled &= ~IFCAP_LRO;
813 
814 		bridge_set_ifcap(sc, bif, enabled);
815 	}
816 
817 }
818 
819 static void
820 bridge_set_ifcap(struct bridge_softc *sc, struct bridge_iflist *bif, int set)
821 {
822 	struct ifnet *ifp = bif->bif_ifp;
823 	struct ifreq ifr;
824 	int error;
825 
826 	bzero(&ifr, sizeof(ifr));
827 	ifr.ifr_reqcap = set;
828 
829 	if (ifp->if_capenable != set) {
830 		IFF_LOCKGIANT(ifp);
831 		error = (*ifp->if_ioctl)(ifp, SIOCSIFCAP, (caddr_t)&ifr);
832 		IFF_UNLOCKGIANT(ifp);
833 		if (error)
834 			if_printf(sc->sc_ifp,
835 			    "error setting interface capabilities on %s\n",
836 			    ifp->if_xname);
837 	}
838 }
839 
840 /*
841  * bridge_lookup_member:
842  *
843  *	Lookup a bridge member interface.
844  */
845 static struct bridge_iflist *
846 bridge_lookup_member(struct bridge_softc *sc, const char *name)
847 {
848 	struct bridge_iflist *bif;
849 	struct ifnet *ifp;
850 
851 	BRIDGE_LOCK_ASSERT(sc);
852 
853 	LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
854 		ifp = bif->bif_ifp;
855 		if (strcmp(ifp->if_xname, name) == 0)
856 			return (bif);
857 	}
858 
859 	return (NULL);
860 }
861 
862 /*
863  * bridge_lookup_member_if:
864  *
865  *	Lookup a bridge member interface by ifnet*.
866  */
867 static struct bridge_iflist *
868 bridge_lookup_member_if(struct bridge_softc *sc, struct ifnet *member_ifp)
869 {
870 	struct bridge_iflist *bif;
871 
872 	BRIDGE_LOCK_ASSERT(sc);
873 
874 	LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
875 		if (bif->bif_ifp == member_ifp)
876 			return (bif);
877 	}
878 
879 	return (NULL);
880 }
881 
882 /*
883  * bridge_delete_member:
884  *
885  *	Delete the specified member interface.
886  */
887 static void
888 bridge_delete_member(struct bridge_softc *sc, struct bridge_iflist *bif,
889     int gone)
890 {
891 	struct ifnet *ifs = bif->bif_ifp;
892 	struct ifnet *fif = NULL;
893 
894 	BRIDGE_LOCK_ASSERT(sc);
895 
896 	if (!gone) {
897 		switch (ifs->if_type) {
898 		case IFT_ETHER:
899 		case IFT_L2VLAN:
900 			/*
901 			 * Take the interface out of promiscuous mode.
902 			 */
903 			(void) ifpromisc(ifs, 0);
904 			break;
905 
906 		case IFT_GIF:
907 			break;
908 
909 		default:
910 #ifdef DIAGNOSTIC
911 			panic("bridge_delete_member: impossible");
912 #endif
913 			break;
914 		}
915 		/* reneable any interface capabilities */
916 		bridge_set_ifcap(sc, bif, bif->bif_savedcaps);
917 	}
918 
919 	if (bif->bif_flags & IFBIF_STP)
920 		bstp_disable(&bif->bif_stp);
921 
922 	ifs->if_bridge = NULL;
923 	BRIDGE_XLOCK(sc);
924 	LIST_REMOVE(bif, bif_next);
925 	BRIDGE_XDROP(sc);
926 
927 	/*
928 	 * If removing the interface that gave the bridge its mac address, set
929 	 * the mac address of the bridge to the address of the next member, or
930 	 * to its default address if no members are left.
931 	 */
932 	if (bridge_inherit_mac &&
933 	    !memcmp(IF_LLADDR(sc->sc_ifp), IF_LLADDR(ifs), ETHER_ADDR_LEN)) {
934 		if (LIST_EMPTY(&sc->sc_iflist))
935 			bcopy(sc->sc_defaddr,
936 			    IF_LLADDR(sc->sc_ifp), ETHER_ADDR_LEN);
937 		else {
938 			fif = LIST_FIRST(&sc->sc_iflist)->bif_ifp;
939 			bcopy(IF_LLADDR(fif),
940 			    IF_LLADDR(sc->sc_ifp), ETHER_ADDR_LEN);
941 		}
942 	}
943 
944 	bridge_mutecaps(sc);	/* recalcuate now this interface is removed */
945 	bridge_rtdelete(sc, ifs, IFBF_FLUSHALL);
946 	KASSERT(bif->bif_addrcnt == 0,
947 	    ("%s: %d bridge routes referenced", __func__, bif->bif_addrcnt));
948 
949 	BRIDGE_UNLOCK(sc);
950 	bstp_destroy(&bif->bif_stp);	/* prepare to free */
951 	BRIDGE_LOCK(sc);
952 	free(bif, M_DEVBUF);
953 }
954 
955 /*
956  * bridge_delete_span:
957  *
958  *	Delete the specified span interface.
959  */
960 static void
961 bridge_delete_span(struct bridge_softc *sc, struct bridge_iflist *bif)
962 {
963 	BRIDGE_LOCK_ASSERT(sc);
964 
965 	KASSERT(bif->bif_ifp->if_bridge == NULL,
966 	    ("%s: not a span interface", __func__));
967 
968 	LIST_REMOVE(bif, bif_next);
969 	free(bif, M_DEVBUF);
970 }
971 
972 static int
973 bridge_ioctl_add(struct bridge_softc *sc, void *arg)
974 {
975 	struct ifbreq *req = arg;
976 	struct bridge_iflist *bif = NULL;
977 	struct ifnet *ifs;
978 	int error = 0;
979 
980 	ifs = ifunit(req->ifbr_ifsname);
981 	if (ifs == NULL)
982 		return (ENOENT);
983 	if (ifs->if_ioctl == NULL)	/* must be supported */
984 		return (EINVAL);
985 
986 	/* If it's in the span list, it can't be a member. */
987 	LIST_FOREACH(bif, &sc->sc_spanlist, bif_next)
988 		if (ifs == bif->bif_ifp)
989 			return (EBUSY);
990 
991 	/* Allow the first Ethernet member to define the MTU */
992 	if (ifs->if_type != IFT_GIF) {
993 		if (LIST_EMPTY(&sc->sc_iflist))
994 			sc->sc_ifp->if_mtu = ifs->if_mtu;
995 		else if (sc->sc_ifp->if_mtu != ifs->if_mtu) {
996 			if_printf(sc->sc_ifp, "invalid MTU for %s\n",
997 			    ifs->if_xname);
998 			return (EINVAL);
999 		}
1000 	}
1001 
1002 	if (ifs->if_bridge == sc)
1003 		return (EEXIST);
1004 
1005 	if (ifs->if_bridge != NULL)
1006 		return (EBUSY);
1007 
1008 	bif = malloc(sizeof(*bif), M_DEVBUF, M_NOWAIT|M_ZERO);
1009 	if (bif == NULL)
1010 		return (ENOMEM);
1011 
1012 	bif->bif_ifp = ifs;
1013 	bif->bif_flags = IFBIF_LEARNING | IFBIF_DISCOVER;
1014 	bif->bif_savedcaps = ifs->if_capenable;
1015 
1016 	switch (ifs->if_type) {
1017 	case IFT_ETHER:
1018 	case IFT_L2VLAN:
1019 		/*
1020 		 * Place the interface into promiscuous mode.
1021 		 */
1022 		error = ifpromisc(ifs, 1);
1023 		if (error)
1024 			goto out;
1025 		break;
1026 
1027 	case IFT_GIF:
1028 		break;
1029 
1030 	default:
1031 		error = EINVAL;
1032 		goto out;
1033 	}
1034 
1035 	/*
1036 	 * Assign the interface's MAC address to the bridge if it's the first
1037 	 * member and the MAC address of the bridge has not been changed from
1038 	 * the default randomly generated one.
1039 	 */
1040 	if (bridge_inherit_mac && LIST_EMPTY(&sc->sc_iflist) &&
1041 	    !memcmp(IF_LLADDR(sc->sc_ifp), sc->sc_defaddr, ETHER_ADDR_LEN))
1042 		bcopy(IF_LLADDR(ifs), IF_LLADDR(sc->sc_ifp), ETHER_ADDR_LEN);
1043 
1044 	ifs->if_bridge = sc;
1045 	bstp_create(&sc->sc_stp, &bif->bif_stp, bif->bif_ifp);
1046 	/*
1047 	 * XXX: XLOCK HERE!?!
1048 	 *
1049 	 * NOTE: insert_***HEAD*** should be safe for the traversals.
1050 	 */
1051 	LIST_INSERT_HEAD(&sc->sc_iflist, bif, bif_next);
1052 
1053 	/* Set interface capabilities to the intersection set of all members */
1054 	bridge_mutecaps(sc);
1055 out:
1056 	if (error) {
1057 		if (bif != NULL)
1058 			free(bif, M_DEVBUF);
1059 	}
1060 	return (error);
1061 }
1062 
1063 static int
1064 bridge_ioctl_del(struct bridge_softc *sc, void *arg)
1065 {
1066 	struct ifbreq *req = arg;
1067 	struct bridge_iflist *bif;
1068 
1069 	bif = bridge_lookup_member(sc, req->ifbr_ifsname);
1070 	if (bif == NULL)
1071 		return (ENOENT);
1072 
1073 	bridge_delete_member(sc, bif, 0);
1074 
1075 	return (0);
1076 }
1077 
1078 static int
1079 bridge_ioctl_gifflags(struct bridge_softc *sc, void *arg)
1080 {
1081 	struct ifbreq *req = arg;
1082 	struct bridge_iflist *bif;
1083 	struct bstp_port *bp;
1084 
1085 	bif = bridge_lookup_member(sc, req->ifbr_ifsname);
1086 	if (bif == NULL)
1087 		return (ENOENT);
1088 
1089 	bp = &bif->bif_stp;
1090 	req->ifbr_ifsflags = bif->bif_flags;
1091 	req->ifbr_state = bp->bp_state;
1092 	req->ifbr_priority = bp->bp_priority;
1093 	req->ifbr_path_cost = bp->bp_path_cost;
1094 	req->ifbr_portno = bif->bif_ifp->if_index & 0xfff;
1095 	req->ifbr_proto = bp->bp_protover;
1096 	req->ifbr_role = bp->bp_role;
1097 	req->ifbr_stpflags = bp->bp_flags;
1098 	req->ifbr_addrcnt = bif->bif_addrcnt;
1099 	req->ifbr_addrmax = bif->bif_addrmax;
1100 	req->ifbr_addrexceeded = bif->bif_addrexceeded;
1101 
1102 	/* Copy STP state options as flags */
1103 	if (bp->bp_operedge)
1104 		req->ifbr_ifsflags |= IFBIF_BSTP_EDGE;
1105 	if (bp->bp_flags & BSTP_PORT_AUTOEDGE)
1106 		req->ifbr_ifsflags |= IFBIF_BSTP_AUTOEDGE;
1107 	if (bp->bp_ptp_link)
1108 		req->ifbr_ifsflags |= IFBIF_BSTP_PTP;
1109 	if (bp->bp_flags & BSTP_PORT_AUTOPTP)
1110 		req->ifbr_ifsflags |= IFBIF_BSTP_AUTOPTP;
1111 	if (bp->bp_flags & BSTP_PORT_ADMEDGE)
1112 		req->ifbr_ifsflags |= IFBIF_BSTP_ADMEDGE;
1113 	if (bp->bp_flags & BSTP_PORT_ADMCOST)
1114 		req->ifbr_ifsflags |= IFBIF_BSTP_ADMCOST;
1115 	return (0);
1116 }
1117 
1118 static int
1119 bridge_ioctl_sifflags(struct bridge_softc *sc, void *arg)
1120 {
1121 	struct ifbreq *req = arg;
1122 	struct bridge_iflist *bif;
1123 	struct bstp_port *bp;
1124 	int error;
1125 
1126 	bif = bridge_lookup_member(sc, req->ifbr_ifsname);
1127 	if (bif == NULL)
1128 		return (ENOENT);
1129 	bp = &bif->bif_stp;
1130 
1131 	if (req->ifbr_ifsflags & IFBIF_SPAN)
1132 		/* SPAN is readonly */
1133 		return (EINVAL);
1134 
1135 	if (req->ifbr_ifsflags & IFBIF_STP) {
1136 		if ((bif->bif_flags & IFBIF_STP) == 0) {
1137 			error = bstp_enable(&bif->bif_stp);
1138 			if (error)
1139 				return (error);
1140 		}
1141 	} else {
1142 		if ((bif->bif_flags & IFBIF_STP) != 0)
1143 			bstp_disable(&bif->bif_stp);
1144 	}
1145 
1146 	/* Pass on STP flags */
1147 	bstp_set_edge(bp, req->ifbr_ifsflags & IFBIF_BSTP_EDGE ? 1 : 0);
1148 	bstp_set_autoedge(bp, req->ifbr_ifsflags & IFBIF_BSTP_AUTOEDGE ? 1 : 0);
1149 	bstp_set_ptp(bp, req->ifbr_ifsflags & IFBIF_BSTP_PTP ? 1 : 0);
1150 	bstp_set_autoptp(bp, req->ifbr_ifsflags & IFBIF_BSTP_AUTOPTP ? 1 : 0);
1151 
1152 	/* Save the bits relating to the bridge */
1153 	bif->bif_flags = req->ifbr_ifsflags & IFBIFMASK;
1154 
1155 	return (0);
1156 }
1157 
1158 static int
1159 bridge_ioctl_scache(struct bridge_softc *sc, void *arg)
1160 {
1161 	struct ifbrparam *param = arg;
1162 
1163 	sc->sc_brtmax = param->ifbrp_csize;
1164 	bridge_rttrim(sc);
1165 
1166 	return (0);
1167 }
1168 
1169 static int
1170 bridge_ioctl_gcache(struct bridge_softc *sc, void *arg)
1171 {
1172 	struct ifbrparam *param = arg;
1173 
1174 	param->ifbrp_csize = sc->sc_brtmax;
1175 
1176 	return (0);
1177 }
1178 
1179 static int
1180 bridge_ioctl_gifs(struct bridge_softc *sc, void *arg)
1181 {
1182 	struct ifbifconf *bifc = arg;
1183 	struct bridge_iflist *bif;
1184 	struct ifbreq breq;
1185 	char *buf, *outbuf;
1186 	int count, buflen, len, error = 0;
1187 
1188 	count = 0;
1189 	LIST_FOREACH(bif, &sc->sc_iflist, bif_next)
1190 		count++;
1191 	LIST_FOREACH(bif, &sc->sc_spanlist, bif_next)
1192 		count++;
1193 
1194 	buflen = sizeof(breq) * count;
1195 	if (bifc->ifbic_len == 0) {
1196 		bifc->ifbic_len = buflen;
1197 		return (0);
1198 	}
1199 	BRIDGE_UNLOCK(sc);
1200 	outbuf = malloc(buflen, M_TEMP, M_WAITOK | M_ZERO);
1201 	BRIDGE_LOCK(sc);
1202 
1203 	count = 0;
1204 	buf = outbuf;
1205 	len = min(bifc->ifbic_len, buflen);
1206 	bzero(&breq, sizeof(breq));
1207 	LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
1208 		if (len < sizeof(breq))
1209 			break;
1210 
1211 		strlcpy(breq.ifbr_ifsname, bif->bif_ifp->if_xname,
1212 		    sizeof(breq.ifbr_ifsname));
1213 		/* Fill in the ifbreq structure */
1214 		error = bridge_ioctl_gifflags(sc, &breq);
1215 		if (error)
1216 			break;
1217 		memcpy(buf, &breq, sizeof(breq));
1218 		count++;
1219 		buf += sizeof(breq);
1220 		len -= sizeof(breq);
1221 	}
1222 	LIST_FOREACH(bif, &sc->sc_spanlist, bif_next) {
1223 		if (len < sizeof(breq))
1224 			break;
1225 
1226 		strlcpy(breq.ifbr_ifsname, bif->bif_ifp->if_xname,
1227 		    sizeof(breq.ifbr_ifsname));
1228 		breq.ifbr_ifsflags = bif->bif_flags;
1229 		breq.ifbr_portno = bif->bif_ifp->if_index & 0xfff;
1230 		memcpy(buf, &breq, sizeof(breq));
1231 		count++;
1232 		buf += sizeof(breq);
1233 		len -= sizeof(breq);
1234 	}
1235 
1236 	BRIDGE_UNLOCK(sc);
1237 	bifc->ifbic_len = sizeof(breq) * count;
1238 	error = copyout(outbuf, bifc->ifbic_req, bifc->ifbic_len);
1239 	BRIDGE_LOCK(sc);
1240 	free(outbuf, M_TEMP);
1241 	return (error);
1242 }
1243 
1244 static int
1245 bridge_ioctl_rts(struct bridge_softc *sc, void *arg)
1246 {
1247 	struct ifbaconf *bac = arg;
1248 	struct bridge_rtnode *brt;
1249 	struct ifbareq bareq;
1250 	char *buf, *outbuf;
1251 	int count, buflen, len, error = 0;
1252 
1253 	if (bac->ifbac_len == 0)
1254 		return (0);
1255 
1256 	count = 0;
1257 	LIST_FOREACH(brt, &sc->sc_rtlist, brt_list)
1258 		count++;
1259 	buflen = sizeof(bareq) * count;
1260 
1261 	BRIDGE_UNLOCK(sc);
1262 	outbuf = malloc(buflen, M_TEMP, M_WAITOK | M_ZERO);
1263 	BRIDGE_LOCK(sc);
1264 
1265 	count = 0;
1266 	buf = outbuf;
1267 	len = min(bac->ifbac_len, buflen);
1268 	bzero(&bareq, sizeof(bareq));
1269 	LIST_FOREACH(brt, &sc->sc_rtlist, brt_list) {
1270 		if (len < sizeof(bareq))
1271 			goto out;
1272 		strlcpy(bareq.ifba_ifsname, brt->brt_ifp->if_xname,
1273 		    sizeof(bareq.ifba_ifsname));
1274 		memcpy(bareq.ifba_dst, brt->brt_addr, sizeof(brt->brt_addr));
1275 		bareq.ifba_vlan = brt->brt_vlan;
1276 		if ((brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC &&
1277 				time_uptime < brt->brt_expire)
1278 			bareq.ifba_expire = brt->brt_expire - time_uptime;
1279 		else
1280 			bareq.ifba_expire = 0;
1281 		bareq.ifba_flags = brt->brt_flags;
1282 
1283 		memcpy(buf, &bareq, sizeof(bareq));
1284 		count++;
1285 		buf += sizeof(bareq);
1286 		len -= sizeof(bareq);
1287 	}
1288 out:
1289 	BRIDGE_UNLOCK(sc);
1290 	bac->ifbac_len = sizeof(bareq) * count;
1291 	error = copyout(outbuf, bac->ifbac_req, bac->ifbac_len);
1292 	BRIDGE_LOCK(sc);
1293 	free(outbuf, M_TEMP);
1294 	return (error);
1295 }
1296 
1297 static int
1298 bridge_ioctl_saddr(struct bridge_softc *sc, void *arg)
1299 {
1300 	struct ifbareq *req = arg;
1301 	struct bridge_iflist *bif;
1302 	int error;
1303 
1304 	bif = bridge_lookup_member(sc, req->ifba_ifsname);
1305 	if (bif == NULL)
1306 		return (ENOENT);
1307 
1308 	error = bridge_rtupdate(sc, req->ifba_dst, req->ifba_vlan, bif, 1,
1309 	    req->ifba_flags);
1310 
1311 	return (error);
1312 }
1313 
1314 static int
1315 bridge_ioctl_sto(struct bridge_softc *sc, void *arg)
1316 {
1317 	struct ifbrparam *param = arg;
1318 
1319 	sc->sc_brttimeout = param->ifbrp_ctime;
1320 	return (0);
1321 }
1322 
1323 static int
1324 bridge_ioctl_gto(struct bridge_softc *sc, void *arg)
1325 {
1326 	struct ifbrparam *param = arg;
1327 
1328 	param->ifbrp_ctime = sc->sc_brttimeout;
1329 	return (0);
1330 }
1331 
1332 static int
1333 bridge_ioctl_daddr(struct bridge_softc *sc, void *arg)
1334 {
1335 	struct ifbareq *req = arg;
1336 
1337 	return (bridge_rtdaddr(sc, req->ifba_dst, req->ifba_vlan));
1338 }
1339 
1340 static int
1341 bridge_ioctl_flush(struct bridge_softc *sc, void *arg)
1342 {
1343 	struct ifbreq *req = arg;
1344 
1345 	bridge_rtflush(sc, req->ifbr_ifsflags);
1346 	return (0);
1347 }
1348 
1349 static int
1350 bridge_ioctl_gpri(struct bridge_softc *sc, void *arg)
1351 {
1352 	struct ifbrparam *param = arg;
1353 	struct bstp_state *bs = &sc->sc_stp;
1354 
1355 	param->ifbrp_prio = bs->bs_bridge_priority;
1356 	return (0);
1357 }
1358 
1359 static int
1360 bridge_ioctl_spri(struct bridge_softc *sc, void *arg)
1361 {
1362 	struct ifbrparam *param = arg;
1363 
1364 	return (bstp_set_priority(&sc->sc_stp, param->ifbrp_prio));
1365 }
1366 
1367 static int
1368 bridge_ioctl_ght(struct bridge_softc *sc, void *arg)
1369 {
1370 	struct ifbrparam *param = arg;
1371 	struct bstp_state *bs = &sc->sc_stp;
1372 
1373 	param->ifbrp_hellotime = bs->bs_bridge_htime >> 8;
1374 	return (0);
1375 }
1376 
1377 static int
1378 bridge_ioctl_sht(struct bridge_softc *sc, void *arg)
1379 {
1380 	struct ifbrparam *param = arg;
1381 
1382 	return (bstp_set_htime(&sc->sc_stp, param->ifbrp_hellotime));
1383 }
1384 
1385 static int
1386 bridge_ioctl_gfd(struct bridge_softc *sc, void *arg)
1387 {
1388 	struct ifbrparam *param = arg;
1389 	struct bstp_state *bs = &sc->sc_stp;
1390 
1391 	param->ifbrp_fwddelay = bs->bs_bridge_fdelay >> 8;
1392 	return (0);
1393 }
1394 
1395 static int
1396 bridge_ioctl_sfd(struct bridge_softc *sc, void *arg)
1397 {
1398 	struct ifbrparam *param = arg;
1399 
1400 	return (bstp_set_fdelay(&sc->sc_stp, param->ifbrp_fwddelay));
1401 }
1402 
1403 static int
1404 bridge_ioctl_gma(struct bridge_softc *sc, void *arg)
1405 {
1406 	struct ifbrparam *param = arg;
1407 	struct bstp_state *bs = &sc->sc_stp;
1408 
1409 	param->ifbrp_maxage = bs->bs_bridge_max_age >> 8;
1410 	return (0);
1411 }
1412 
1413 static int
1414 bridge_ioctl_sma(struct bridge_softc *sc, void *arg)
1415 {
1416 	struct ifbrparam *param = arg;
1417 
1418 	return (bstp_set_maxage(&sc->sc_stp, param->ifbrp_maxage));
1419 }
1420 
1421 static int
1422 bridge_ioctl_sifprio(struct bridge_softc *sc, void *arg)
1423 {
1424 	struct ifbreq *req = arg;
1425 	struct bridge_iflist *bif;
1426 
1427 	bif = bridge_lookup_member(sc, req->ifbr_ifsname);
1428 	if (bif == NULL)
1429 		return (ENOENT);
1430 
1431 	return (bstp_set_port_priority(&bif->bif_stp, req->ifbr_priority));
1432 }
1433 
1434 static int
1435 bridge_ioctl_sifcost(struct bridge_softc *sc, void *arg)
1436 {
1437 	struct ifbreq *req = arg;
1438 	struct bridge_iflist *bif;
1439 
1440 	bif = bridge_lookup_member(sc, req->ifbr_ifsname);
1441 	if (bif == NULL)
1442 		return (ENOENT);
1443 
1444 	return (bstp_set_path_cost(&bif->bif_stp, req->ifbr_path_cost));
1445 }
1446 
1447 static int
1448 bridge_ioctl_sifmaxaddr(struct bridge_softc *sc, void *arg)
1449 {
1450 	struct ifbreq *req = arg;
1451 	struct bridge_iflist *bif;
1452 
1453 	bif = bridge_lookup_member(sc, req->ifbr_ifsname);
1454 	if (bif == NULL)
1455 		return (ENOENT);
1456 
1457 	bif->bif_addrmax = req->ifbr_addrmax;
1458 	return (0);
1459 }
1460 
1461 static int
1462 bridge_ioctl_addspan(struct bridge_softc *sc, void *arg)
1463 {
1464 	struct ifbreq *req = arg;
1465 	struct bridge_iflist *bif = NULL;
1466 	struct ifnet *ifs;
1467 
1468 	ifs = ifunit(req->ifbr_ifsname);
1469 	if (ifs == NULL)
1470 		return (ENOENT);
1471 
1472 	LIST_FOREACH(bif, &sc->sc_spanlist, bif_next)
1473 		if (ifs == bif->bif_ifp)
1474 			return (EBUSY);
1475 
1476 	if (ifs->if_bridge != NULL)
1477 		return (EBUSY);
1478 
1479 	switch (ifs->if_type) {
1480 		case IFT_ETHER:
1481 		case IFT_GIF:
1482 		case IFT_L2VLAN:
1483 			break;
1484 		default:
1485 			return (EINVAL);
1486 	}
1487 
1488 	bif = malloc(sizeof(*bif), M_DEVBUF, M_NOWAIT|M_ZERO);
1489 	if (bif == NULL)
1490 		return (ENOMEM);
1491 
1492 	bif->bif_ifp = ifs;
1493 	bif->bif_flags = IFBIF_SPAN;
1494 
1495 	LIST_INSERT_HEAD(&sc->sc_spanlist, bif, bif_next);
1496 
1497 	return (0);
1498 }
1499 
1500 static int
1501 bridge_ioctl_delspan(struct bridge_softc *sc, void *arg)
1502 {
1503 	struct ifbreq *req = arg;
1504 	struct bridge_iflist *bif;
1505 	struct ifnet *ifs;
1506 
1507 	ifs = ifunit(req->ifbr_ifsname);
1508 	if (ifs == NULL)
1509 		return (ENOENT);
1510 
1511 	LIST_FOREACH(bif, &sc->sc_spanlist, bif_next)
1512 		if (ifs == bif->bif_ifp)
1513 			break;
1514 
1515 	if (bif == NULL)
1516 		return (ENOENT);
1517 
1518 	bridge_delete_span(sc, bif);
1519 
1520 	return (0);
1521 }
1522 
1523 static int
1524 bridge_ioctl_gbparam(struct bridge_softc *sc, void *arg)
1525 {
1526 	struct ifbropreq *req = arg;
1527 	struct bstp_state *bs = &sc->sc_stp;
1528 	struct bstp_port *root_port;
1529 
1530 	req->ifbop_maxage = bs->bs_bridge_max_age >> 8;
1531 	req->ifbop_hellotime = bs->bs_bridge_htime >> 8;
1532 	req->ifbop_fwddelay = bs->bs_bridge_fdelay >> 8;
1533 
1534 	root_port = bs->bs_root_port;
1535 	if (root_port == NULL)
1536 		req->ifbop_root_port = 0;
1537 	else
1538 		req->ifbop_root_port = root_port->bp_ifp->if_index;
1539 
1540 	req->ifbop_holdcount = bs->bs_txholdcount;
1541 	req->ifbop_priority = bs->bs_bridge_priority;
1542 	req->ifbop_protocol = bs->bs_protover;
1543 	req->ifbop_root_path_cost = bs->bs_root_pv.pv_cost;
1544 	req->ifbop_bridgeid = bs->bs_bridge_pv.pv_dbridge_id;
1545 	req->ifbop_designated_root = bs->bs_root_pv.pv_root_id;
1546 	req->ifbop_designated_bridge = bs->bs_root_pv.pv_dbridge_id;
1547 	req->ifbop_last_tc_time.tv_sec = bs->bs_last_tc_time.tv_sec;
1548 	req->ifbop_last_tc_time.tv_usec = bs->bs_last_tc_time.tv_usec;
1549 
1550 	return (0);
1551 }
1552 
1553 static int
1554 bridge_ioctl_grte(struct bridge_softc *sc, void *arg)
1555 {
1556 	struct ifbrparam *param = arg;
1557 
1558 	param->ifbrp_cexceeded = sc->sc_brtexceeded;
1559 	return (0);
1560 }
1561 
1562 static int
1563 bridge_ioctl_gifsstp(struct bridge_softc *sc, void *arg)
1564 {
1565 	struct ifbpstpconf *bifstp = arg;
1566 	struct bridge_iflist *bif;
1567 	struct bstp_port *bp;
1568 	struct ifbpstpreq bpreq;
1569 	char *buf, *outbuf;
1570 	int count, buflen, len, error = 0;
1571 
1572 	count = 0;
1573 	LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
1574 		if ((bif->bif_flags & IFBIF_STP) != 0)
1575 			count++;
1576 	}
1577 
1578 	buflen = sizeof(bpreq) * count;
1579 	if (bifstp->ifbpstp_len == 0) {
1580 		bifstp->ifbpstp_len = buflen;
1581 		return (0);
1582 	}
1583 
1584 	BRIDGE_UNLOCK(sc);
1585 	outbuf = malloc(buflen, M_TEMP, M_WAITOK | M_ZERO);
1586 	BRIDGE_LOCK(sc);
1587 
1588 	count = 0;
1589 	buf = outbuf;
1590 	len = min(bifstp->ifbpstp_len, buflen);
1591 	bzero(&bpreq, sizeof(bpreq));
1592 	LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
1593 		if (len < sizeof(bpreq))
1594 			break;
1595 
1596 		if ((bif->bif_flags & IFBIF_STP) == 0)
1597 			continue;
1598 
1599 		bp = &bif->bif_stp;
1600 		bpreq.ifbp_portno = bif->bif_ifp->if_index & 0xfff;
1601 		bpreq.ifbp_fwd_trans = bp->bp_forward_transitions;
1602 		bpreq.ifbp_design_cost = bp->bp_desg_pv.pv_cost;
1603 		bpreq.ifbp_design_port = bp->bp_desg_pv.pv_port_id;
1604 		bpreq.ifbp_design_bridge = bp->bp_desg_pv.pv_dbridge_id;
1605 		bpreq.ifbp_design_root = bp->bp_desg_pv.pv_root_id;
1606 
1607 		memcpy(buf, &bpreq, sizeof(bpreq));
1608 		count++;
1609 		buf += sizeof(bpreq);
1610 		len -= sizeof(bpreq);
1611 	}
1612 
1613 	BRIDGE_UNLOCK(sc);
1614 	bifstp->ifbpstp_len = sizeof(bpreq) * count;
1615 	error = copyout(outbuf, bifstp->ifbpstp_req, bifstp->ifbpstp_len);
1616 	BRIDGE_LOCK(sc);
1617 	free(outbuf, M_TEMP);
1618 	return (error);
1619 }
1620 
1621 static int
1622 bridge_ioctl_sproto(struct bridge_softc *sc, void *arg)
1623 {
1624 	struct ifbrparam *param = arg;
1625 
1626 	return (bstp_set_protocol(&sc->sc_stp, param->ifbrp_proto));
1627 }
1628 
1629 static int
1630 bridge_ioctl_stxhc(struct bridge_softc *sc, void *arg)
1631 {
1632 	struct ifbrparam *param = arg;
1633 
1634 	return (bstp_set_holdcount(&sc->sc_stp, param->ifbrp_txhc));
1635 }
1636 
1637 /*
1638  * bridge_ifdetach:
1639  *
1640  *	Detach an interface from a bridge.  Called when a member
1641  *	interface is detaching.
1642  */
1643 static void
1644 bridge_ifdetach(void *arg __unused, struct ifnet *ifp)
1645 {
1646 	struct bridge_softc *sc = ifp->if_bridge;
1647 	struct bridge_iflist *bif;
1648 
1649 	/* Check if the interface is a bridge member */
1650 	if (sc != NULL) {
1651 		BRIDGE_LOCK(sc);
1652 
1653 		bif = bridge_lookup_member_if(sc, ifp);
1654 		if (bif != NULL)
1655 			bridge_delete_member(sc, bif, 1);
1656 
1657 		BRIDGE_UNLOCK(sc);
1658 		return;
1659 	}
1660 
1661 	/* Check if the interface is a span port */
1662 	mtx_lock(&bridge_list_mtx);
1663 	LIST_FOREACH(sc, &bridge_list, sc_list) {
1664 		BRIDGE_LOCK(sc);
1665 		LIST_FOREACH(bif, &sc->sc_spanlist, bif_next)
1666 			if (ifp == bif->bif_ifp) {
1667 				bridge_delete_span(sc, bif);
1668 				break;
1669 			}
1670 
1671 		BRIDGE_UNLOCK(sc);
1672 	}
1673 	mtx_unlock(&bridge_list_mtx);
1674 }
1675 
1676 /*
1677  * bridge_init:
1678  *
1679  *	Initialize a bridge interface.
1680  */
1681 static void
1682 bridge_init(void *xsc)
1683 {
1684 	struct bridge_softc *sc = (struct bridge_softc *)xsc;
1685 	struct ifnet *ifp = sc->sc_ifp;
1686 
1687 	if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1688 		return;
1689 
1690 	BRIDGE_LOCK(sc);
1691 	callout_reset(&sc->sc_brcallout, bridge_rtable_prune_period * hz,
1692 	    bridge_timer, sc);
1693 
1694 	ifp->if_drv_flags |= IFF_DRV_RUNNING;
1695 	bstp_init(&sc->sc_stp);		/* Initialize Spanning Tree */
1696 
1697 	BRIDGE_UNLOCK(sc);
1698 }
1699 
1700 /*
1701  * bridge_stop:
1702  *
1703  *	Stop the bridge interface.
1704  */
1705 static void
1706 bridge_stop(struct ifnet *ifp, int disable)
1707 {
1708 	struct bridge_softc *sc = ifp->if_softc;
1709 
1710 	BRIDGE_LOCK_ASSERT(sc);
1711 
1712 	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1713 		return;
1714 
1715 	callout_stop(&sc->sc_brcallout);
1716 	bstp_stop(&sc->sc_stp);
1717 
1718 	bridge_rtflush(sc, IFBF_FLUSHDYN);
1719 
1720 	ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1721 }
1722 
1723 /*
1724  * bridge_enqueue:
1725  *
1726  *	Enqueue a packet on a bridge member interface.
1727  *
1728  */
1729 static void
1730 bridge_enqueue(struct bridge_softc *sc, struct ifnet *dst_ifp, struct mbuf *m)
1731 {
1732 	int len, err = 0;
1733 	short mflags;
1734 	struct mbuf *m0;
1735 
1736 	len = m->m_pkthdr.len;
1737 	mflags = m->m_flags;
1738 
1739 	/* We may be sending a fragment so traverse the mbuf */
1740 	for (; m; m = m0) {
1741 		m0 = m->m_nextpkt;
1742 		m->m_nextpkt = NULL;
1743 
1744 		/*
1745 		 * If underlying interface can not do VLAN tag insertion itself
1746 		 * then attach a packet tag that holds it.
1747 		 */
1748 		if ((m->m_flags & M_VLANTAG) &&
1749 		    (dst_ifp->if_capenable & IFCAP_VLAN_HWTAGGING) == 0) {
1750 			m = ether_vlanencap(m, m->m_pkthdr.ether_vtag);
1751 			if (m == NULL) {
1752 				if_printf(dst_ifp,
1753 				    "unable to prepend VLAN header\n");
1754 				dst_ifp->if_oerrors++;
1755 				continue;
1756 			}
1757 			m->m_flags &= ~M_VLANTAG;
1758 		}
1759 
1760 		if (err == 0)
1761 			IFQ_ENQUEUE(&dst_ifp->if_snd, m, err);
1762 	}
1763 
1764 	if (err == 0) {
1765 
1766 		sc->sc_ifp->if_opackets++;
1767 		sc->sc_ifp->if_obytes += len;
1768 
1769 		dst_ifp->if_obytes += len;
1770 
1771 		if (mflags & M_MCAST) {
1772 			sc->sc_ifp->if_omcasts++;
1773 			dst_ifp->if_omcasts++;
1774 		}
1775 	}
1776 
1777 	if ((dst_ifp->if_drv_flags & IFF_DRV_OACTIVE) == 0)
1778 		(*dst_ifp->if_start)(dst_ifp);
1779 }
1780 
1781 /*
1782  * bridge_dummynet:
1783  *
1784  * 	Receive a queued packet from dummynet and pass it on to the output
1785  * 	interface.
1786  *
1787  *	The mbuf has the Ethernet header already attached.
1788  */
1789 static void
1790 bridge_dummynet(struct mbuf *m, struct ifnet *ifp)
1791 {
1792 	struct bridge_softc *sc;
1793 
1794 	sc = ifp->if_bridge;
1795 
1796 	/*
1797 	 * The packet didnt originate from a member interface. This should only
1798 	 * ever happen if a member interface is removed while packets are
1799 	 * queued for it.
1800 	 */
1801 	if (sc == NULL) {
1802 		m_freem(m);
1803 		return;
1804 	}
1805 
1806 	if (PFIL_HOOKED(&inet_pfil_hook)
1807 #ifdef INET6
1808 	    || PFIL_HOOKED(&inet6_pfil_hook)
1809 #endif
1810 	    ) {
1811 		if (bridge_pfil(&m, sc->sc_ifp, ifp, PFIL_OUT) != 0)
1812 			return;
1813 		if (m == NULL)
1814 			return;
1815 	}
1816 
1817 	bridge_enqueue(sc, ifp, m);
1818 }
1819 
1820 /*
1821  * bridge_output:
1822  *
1823  *	Send output from a bridge member interface.  This
1824  *	performs the bridging function for locally originated
1825  *	packets.
1826  *
1827  *	The mbuf has the Ethernet header already attached.  We must
1828  *	enqueue or free the mbuf before returning.
1829  */
1830 static int
1831 bridge_output(struct ifnet *ifp, struct mbuf *m, struct sockaddr *sa,
1832     struct rtentry *rt)
1833 {
1834 	struct ether_header *eh;
1835 	struct ifnet *dst_if;
1836 	struct bridge_softc *sc;
1837 	uint16_t vlan;
1838 
1839 	if (m->m_len < ETHER_HDR_LEN) {
1840 		m = m_pullup(m, ETHER_HDR_LEN);
1841 		if (m == NULL)
1842 			return (0);
1843 	}
1844 
1845 	eh = mtod(m, struct ether_header *);
1846 	sc = ifp->if_bridge;
1847 	vlan = VLANTAGOF(m);
1848 
1849 	BRIDGE_LOCK(sc);
1850 
1851 	/*
1852 	 * If bridge is down, but the original output interface is up,
1853 	 * go ahead and send out that interface.  Otherwise, the packet
1854 	 * is dropped below.
1855 	 */
1856 	if ((sc->sc_ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
1857 		dst_if = ifp;
1858 		goto sendunicast;
1859 	}
1860 
1861 	/*
1862 	 * If the packet is a multicast, or we don't know a better way to
1863 	 * get there, send to all interfaces.
1864 	 */
1865 	if (ETHER_IS_MULTICAST(eh->ether_dhost))
1866 		dst_if = NULL;
1867 	else
1868 		dst_if = bridge_rtlookup(sc, eh->ether_dhost, vlan);
1869 	if (dst_if == NULL) {
1870 		struct bridge_iflist *bif;
1871 		struct mbuf *mc;
1872 		int error = 0, used = 0;
1873 
1874 		bridge_span(sc, m);
1875 
1876 		BRIDGE_LOCK2REF(sc, error);
1877 		if (error) {
1878 			m_freem(m);
1879 			return (0);
1880 		}
1881 
1882 		LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
1883 			dst_if = bif->bif_ifp;
1884 
1885 			if (dst_if->if_type == IFT_GIF)
1886 				continue;
1887 			if ((dst_if->if_drv_flags & IFF_DRV_RUNNING) == 0)
1888 				continue;
1889 
1890 			/*
1891 			 * If this is not the original output interface,
1892 			 * and the interface is participating in spanning
1893 			 * tree, make sure the port is in a state that
1894 			 * allows forwarding.
1895 			 */
1896 			if (dst_if != ifp && (bif->bif_flags & IFBIF_STP) &&
1897 			    bif->bif_stp.bp_state == BSTP_IFSTATE_DISCARDING)
1898 				continue;
1899 
1900 			if (LIST_NEXT(bif, bif_next) == NULL) {
1901 				used = 1;
1902 				mc = m;
1903 			} else {
1904 				mc = m_copypacket(m, M_DONTWAIT);
1905 				if (mc == NULL) {
1906 					sc->sc_ifp->if_oerrors++;
1907 					continue;
1908 				}
1909 			}
1910 
1911 			bridge_enqueue(sc, dst_if, mc);
1912 		}
1913 		if (used == 0)
1914 			m_freem(m);
1915 		BRIDGE_UNREF(sc);
1916 		return (0);
1917 	}
1918 
1919 sendunicast:
1920 	/*
1921 	 * XXX Spanning tree consideration here?
1922 	 */
1923 
1924 	bridge_span(sc, m);
1925 	if ((dst_if->if_drv_flags & IFF_DRV_RUNNING) == 0) {
1926 		m_freem(m);
1927 		BRIDGE_UNLOCK(sc);
1928 		return (0);
1929 	}
1930 
1931 	BRIDGE_UNLOCK(sc);
1932 	bridge_enqueue(sc, dst_if, m);
1933 	return (0);
1934 }
1935 
1936 /*
1937  * bridge_start:
1938  *
1939  *	Start output on a bridge.
1940  *
1941  */
1942 static void
1943 bridge_start(struct ifnet *ifp)
1944 {
1945 	struct bridge_softc *sc;
1946 	struct mbuf *m;
1947 	struct ether_header *eh;
1948 	struct ifnet *dst_if;
1949 
1950 	sc = ifp->if_softc;
1951 
1952 	ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1953 	for (;;) {
1954 		IFQ_DEQUEUE(&ifp->if_snd, m);
1955 		if (m == 0)
1956 			break;
1957 		ETHER_BPF_MTAP(ifp, m);
1958 
1959 		eh = mtod(m, struct ether_header *);
1960 		dst_if = NULL;
1961 
1962 		BRIDGE_LOCK(sc);
1963 		if ((m->m_flags & (M_BCAST|M_MCAST)) == 0) {
1964 			dst_if = bridge_rtlookup(sc, eh->ether_dhost, 1);
1965 		}
1966 
1967 		if (dst_if == NULL)
1968 			bridge_broadcast(sc, ifp, m, 0);
1969 		else {
1970 			BRIDGE_UNLOCK(sc);
1971 			bridge_enqueue(sc, dst_if, m);
1972 		}
1973 	}
1974 	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1975 }
1976 
1977 /*
1978  * bridge_forward:
1979  *
1980  *	The forwarding function of the bridge.
1981  *
1982  *	NOTE: Releases the lock on return.
1983  */
1984 static void
1985 bridge_forward(struct bridge_softc *sc, struct bridge_iflist *sbif,
1986     struct mbuf *m)
1987 {
1988 	struct bridge_iflist *dbif;
1989 	struct ifnet *src_if, *dst_if, *ifp;
1990 	struct ether_header *eh;
1991 	uint16_t vlan;
1992 	uint8_t *dst;
1993 	int error;
1994 
1995 	src_if = m->m_pkthdr.rcvif;
1996 	ifp = sc->sc_ifp;
1997 
1998 	ifp->if_ipackets++;
1999 	ifp->if_ibytes += m->m_pkthdr.len;
2000 	vlan = VLANTAGOF(m);
2001 
2002 	if ((sbif->bif_flags & IFBIF_STP) &&
2003 	    sbif->bif_stp.bp_state == BSTP_IFSTATE_DISCARDING)
2004 		goto drop;
2005 
2006 	eh = mtod(m, struct ether_header *);
2007 	dst = eh->ether_dhost;
2008 
2009 	/* If the interface is learning, record the address. */
2010 	if (sbif->bif_flags & IFBIF_LEARNING) {
2011 		error = bridge_rtupdate(sc, eh->ether_shost, vlan,
2012 		    sbif, 0, IFBAF_DYNAMIC);
2013 		/*
2014 		 * If the interface has addresses limits then deny any source
2015 		 * that is not in the cache.
2016 		 */
2017 		if (error && sbif->bif_addrmax)
2018 			goto drop;
2019 	}
2020 
2021 	if ((sbif->bif_flags & IFBIF_STP) != 0 &&
2022 	    sbif->bif_stp.bp_state == BSTP_IFSTATE_LEARNING)
2023 		goto drop;
2024 
2025 	/*
2026 	 * At this point, the port either doesn't participate
2027 	 * in spanning tree or it is in the forwarding state.
2028 	 */
2029 
2030 	/*
2031 	 * If the packet is unicast, destined for someone on
2032 	 * "this" side of the bridge, drop it.
2033 	 */
2034 	if ((m->m_flags & (M_BCAST|M_MCAST)) == 0) {
2035 		dst_if = bridge_rtlookup(sc, dst, vlan);
2036 		if (src_if == dst_if)
2037 			goto drop;
2038 	} else {
2039 		/*
2040 		 * Check if its a reserved multicast address, any address
2041 		 * listed in 802.1D section 7.12.6 may not be forwarded by the
2042 		 * bridge.
2043 		 * This is currently 01-80-C2-00-00-00 to 01-80-C2-00-00-0F
2044 		 */
2045 		if (dst[0] == 0x01 && dst[1] == 0x80 &&
2046 		    dst[2] == 0xc2 && dst[3] == 0x00 &&
2047 		    dst[4] == 0x00 && dst[5] <= 0x0f)
2048 			goto drop;
2049 
2050 		/* ...forward it to all interfaces. */
2051 		ifp->if_imcasts++;
2052 		dst_if = NULL;
2053 	}
2054 
2055 	/*
2056 	 * If we have a destination interface which is a member of our bridge,
2057 	 * OR this is a unicast packet, push it through the bpf(4) machinery.
2058 	 * For broadcast or multicast packets, don't bother because it will
2059 	 * be reinjected into ether_input. We do this before we pass the packets
2060 	 * through the pfil(9) framework, as it is possible that pfil(9) will
2061 	 * drop the packet, or possibly modify it, making it difficult to debug
2062 	 * firewall issues on the bridge.
2063 	 */
2064 	if (dst_if != NULL || (m->m_flags & (M_BCAST | M_MCAST)) == 0)
2065 		ETHER_BPF_MTAP(ifp, m);
2066 
2067 	/* run the packet filter */
2068 	if (PFIL_HOOKED(&inet_pfil_hook)
2069 #ifdef INET6
2070 	    || PFIL_HOOKED(&inet6_pfil_hook)
2071 #endif
2072 	    ) {
2073 		BRIDGE_UNLOCK(sc);
2074 		if (bridge_pfil(&m, ifp, src_if, PFIL_IN) != 0)
2075 			return;
2076 		if (m == NULL)
2077 			return;
2078 		BRIDGE_LOCK(sc);
2079 	}
2080 
2081 	if (dst_if == NULL) {
2082 		bridge_broadcast(sc, src_if, m, 1);
2083 		return;
2084 	}
2085 
2086 	/*
2087 	 * At this point, we're dealing with a unicast frame
2088 	 * going to a different interface.
2089 	 */
2090 	if ((dst_if->if_drv_flags & IFF_DRV_RUNNING) == 0)
2091 		goto drop;
2092 
2093 	dbif = bridge_lookup_member_if(sc, dst_if);
2094 	if (dbif == NULL)
2095 		/* Not a member of the bridge (anymore?) */
2096 		goto drop;
2097 
2098 	/* Private segments can not talk to each other */
2099 	if (sbif->bif_flags & dbif->bif_flags & IFBIF_PRIVATE)
2100 		goto drop;
2101 
2102 	if ((dbif->bif_flags & IFBIF_STP) &&
2103 	    dbif->bif_stp.bp_state == BSTP_IFSTATE_DISCARDING)
2104 		goto drop;
2105 
2106 	BRIDGE_UNLOCK(sc);
2107 
2108 	if (PFIL_HOOKED(&inet_pfil_hook)
2109 #ifdef INET6
2110 	    || PFIL_HOOKED(&inet6_pfil_hook)
2111 #endif
2112 	    ) {
2113 		if (bridge_pfil(&m, ifp, dst_if, PFIL_OUT) != 0)
2114 			return;
2115 		if (m == NULL)
2116 			return;
2117 	}
2118 
2119 	bridge_enqueue(sc, dst_if, m);
2120 	return;
2121 
2122 drop:
2123 	BRIDGE_UNLOCK(sc);
2124 	m_freem(m);
2125 }
2126 
2127 /*
2128  * bridge_input:
2129  *
2130  *	Receive input from a member interface.  Queue the packet for
2131  *	bridging if it is not for us.
2132  */
2133 static struct mbuf *
2134 bridge_input(struct ifnet *ifp, struct mbuf *m)
2135 {
2136 	struct bridge_softc *sc = ifp->if_bridge;
2137 	struct bridge_iflist *bif, *bif2;
2138 	struct ifnet *bifp;
2139 	struct ether_header *eh;
2140 	struct mbuf *mc, *mc2;
2141 	uint16_t vlan;
2142 	int error;
2143 
2144 	if ((sc->sc_ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
2145 		return (m);
2146 
2147 	bifp = sc->sc_ifp;
2148 	vlan = VLANTAGOF(m);
2149 
2150 	/*
2151 	 * Implement support for bridge monitoring. If this flag has been
2152 	 * set on this interface, discard the packet once we push it through
2153 	 * the bpf(4) machinery, but before we do, increment the byte and
2154 	 * packet counters associated with this interface.
2155 	 */
2156 	if ((bifp->if_flags & IFF_MONITOR) != 0) {
2157 		m->m_pkthdr.rcvif  = bifp;
2158 		ETHER_BPF_MTAP(bifp, m);
2159 		bifp->if_ipackets++;
2160 		bifp->if_ibytes += m->m_pkthdr.len;
2161 		m_freem(m);
2162 		return (NULL);
2163 	}
2164 	BRIDGE_LOCK(sc);
2165 	bif = bridge_lookup_member_if(sc, ifp);
2166 	if (bif == NULL) {
2167 		BRIDGE_UNLOCK(sc);
2168 		return (m);
2169 	}
2170 
2171 	eh = mtod(m, struct ether_header *);
2172 
2173 	bridge_span(sc, m);
2174 
2175 	if (m->m_flags & (M_BCAST|M_MCAST)) {
2176 		/* Tap off 802.1D packets; they do not get forwarded. */
2177 		if (memcmp(eh->ether_dhost, bstp_etheraddr,
2178 		    ETHER_ADDR_LEN) == 0) {
2179 			m = bstp_input(&bif->bif_stp, ifp, m);
2180 			if (m == NULL) {
2181 				BRIDGE_UNLOCK(sc);
2182 				return (NULL);
2183 			}
2184 		}
2185 
2186 		if ((bif->bif_flags & IFBIF_STP) &&
2187 		    bif->bif_stp.bp_state == BSTP_IFSTATE_DISCARDING) {
2188 			BRIDGE_UNLOCK(sc);
2189 			return (m);
2190 		}
2191 
2192 		/*
2193 		 * Make a deep copy of the packet and enqueue the copy
2194 		 * for bridge processing; return the original packet for
2195 		 * local processing.
2196 		 */
2197 		mc = m_dup(m, M_DONTWAIT);
2198 		if (mc == NULL) {
2199 			BRIDGE_UNLOCK(sc);
2200 			return (m);
2201 		}
2202 
2203 		/* Perform the bridge forwarding function with the copy. */
2204 		bridge_forward(sc, bif, mc);
2205 
2206 		/*
2207 		 * Reinject the mbuf as arriving on the bridge so we have a
2208 		 * chance at claiming multicast packets. We can not loop back
2209 		 * here from ether_input as a bridge is never a member of a
2210 		 * bridge.
2211 		 */
2212 		KASSERT(bifp->if_bridge == NULL,
2213 		    ("loop created in bridge_input"));
2214 		mc2 = m_dup(m, M_DONTWAIT);
2215 		if (mc2 != NULL) {
2216 			/* Keep the layer3 header aligned */
2217 			int i = min(mc2->m_pkthdr.len, max_protohdr);
2218 			mc2 = m_copyup(mc2, i, ETHER_ALIGN);
2219 		}
2220 		if (mc2 != NULL) {
2221 			mc2->m_pkthdr.rcvif = bifp;
2222 			(*bifp->if_input)(bifp, mc2);
2223 		}
2224 
2225 		/* Return the original packet for local processing. */
2226 		return (m);
2227 	}
2228 
2229 	if ((bif->bif_flags & IFBIF_STP) &&
2230 	    bif->bif_stp.bp_state == BSTP_IFSTATE_DISCARDING) {
2231 		BRIDGE_UNLOCK(sc);
2232 		return (m);
2233 	}
2234 
2235 #ifdef DEV_CARP
2236 #   define OR_CARP_CHECK_WE_ARE_DST(iface) \
2237 	|| ((iface)->if_carp \
2238 	    && carp_forus((iface)->if_carp, eh->ether_dhost))
2239 #   define OR_CARP_CHECK_WE_ARE_SRC(iface) \
2240 	|| ((iface)->if_carp \
2241 	    && carp_forus((iface)->if_carp, eh->ether_shost))
2242 #else
2243 #   define OR_CARP_CHECK_WE_ARE_DST(iface)
2244 #   define OR_CARP_CHECK_WE_ARE_SRC(iface)
2245 #endif
2246 
2247 #ifdef INET6
2248 #   define OR_PFIL_HOOKED_INET6 \
2249 	|| PFIL_HOOKED(&inet6_pfil_hook)
2250 #else
2251 #   define OR_PFIL_HOOKED_INET6
2252 #endif
2253 
2254 #define GRAB_OUR_PACKETS(iface) \
2255 	if ((iface)->if_type == IFT_GIF) \
2256 		continue; \
2257 	/* It is destined for us. */ \
2258 	if (memcmp(IF_LLADDR((iface)), eh->ether_dhost,  ETHER_ADDR_LEN) == 0 \
2259 	    OR_CARP_CHECK_WE_ARE_DST((iface))				\
2260 	    ) {								\
2261 		if ((iface)->if_type == IFT_BRIDGE) {			\
2262 			ETHER_BPF_MTAP(iface, m);			\
2263 			iface->if_ipackets++;				\
2264 			/* Filter on the physical interface. */		\
2265 			if (pfil_local_phys &&				\
2266 			    (PFIL_HOOKED(&inet_pfil_hook)		\
2267 			     OR_PFIL_HOOKED_INET6)) {			\
2268 				if (bridge_pfil(&m, NULL, ifp,		\
2269 				    PFIL_IN) != 0 || m == NULL) {	\
2270 					BRIDGE_UNLOCK(sc);		\
2271 					return (NULL);			\
2272 				}					\
2273 			}						\
2274 		}							\
2275 		if (bif->bif_flags & IFBIF_LEARNING) {			\
2276 			error = bridge_rtupdate(sc, eh->ether_shost,	\
2277 			    vlan, bif, 0, IFBAF_DYNAMIC);		\
2278 			if (error && bif->bif_addrmax) {		\
2279 				BRIDGE_UNLOCK(sc);			\
2280 				m_freem(m);				\
2281 				return (NULL);				\
2282 			}						\
2283 		}							\
2284 		m->m_pkthdr.rcvif = iface;				\
2285 		BRIDGE_UNLOCK(sc);					\
2286 		return (m);						\
2287 	}								\
2288 									\
2289 	/* We just received a packet that we sent out. */		\
2290 	if (memcmp(IF_LLADDR((iface)), eh->ether_shost, ETHER_ADDR_LEN) == 0 \
2291 	    OR_CARP_CHECK_WE_ARE_SRC((iface))			\
2292 	    ) {								\
2293 		BRIDGE_UNLOCK(sc);					\
2294 		m_freem(m);						\
2295 		return (NULL);						\
2296 	}
2297 
2298 	/*
2299 	 * Unicast.  Make sure it's not for the bridge.
2300 	 */
2301 	do { GRAB_OUR_PACKETS(bifp) } while (0);
2302 
2303 	/*
2304 	 * Give a chance for ifp at first priority. This will help when	the
2305 	 * packet comes through the interface like VLAN's with the same MACs
2306 	 * on several interfaces from the same bridge. This also will save
2307 	 * some CPU cycles in case the destination interface and the input
2308 	 * interface (eq ifp) are the same.
2309 	 */
2310 	do { GRAB_OUR_PACKETS(ifp) } while (0);
2311 
2312 	/* Now check the all bridge members. */
2313 	LIST_FOREACH(bif2, &sc->sc_iflist, bif_next) {
2314 		GRAB_OUR_PACKETS(bif2->bif_ifp)
2315 	}
2316 
2317 #undef OR_CARP_CHECK_WE_ARE_DST
2318 #undef OR_CARP_CHECK_WE_ARE_SRC
2319 #undef OR_PFIL_HOOKED_INET6
2320 #undef GRAB_OUR_PACKETS
2321 
2322 	/* Perform the bridge forwarding function. */
2323 	bridge_forward(sc, bif, m);
2324 
2325 	return (NULL);
2326 }
2327 
2328 /*
2329  * bridge_broadcast:
2330  *
2331  *	Send a frame to all interfaces that are members of
2332  *	the bridge, except for the one on which the packet
2333  *	arrived.
2334  *
2335  *	NOTE: Releases the lock on return.
2336  */
2337 static void
2338 bridge_broadcast(struct bridge_softc *sc, struct ifnet *src_if,
2339     struct mbuf *m, int runfilt)
2340 {
2341 	struct bridge_iflist *dbif, *sbif;
2342 	struct mbuf *mc;
2343 	struct ifnet *dst_if;
2344 	int error = 0, used = 0, i;
2345 
2346 	sbif = bridge_lookup_member_if(sc, src_if);
2347 
2348 	BRIDGE_LOCK2REF(sc, error);
2349 	if (error) {
2350 		m_freem(m);
2351 		return;
2352 	}
2353 
2354 	/* Filter on the bridge interface before broadcasting */
2355 	if (runfilt && (PFIL_HOOKED(&inet_pfil_hook)
2356 #ifdef INET6
2357 	    || PFIL_HOOKED(&inet6_pfil_hook)
2358 #endif
2359 	    )) {
2360 		if (bridge_pfil(&m, sc->sc_ifp, NULL, PFIL_OUT) != 0)
2361 			goto out;
2362 		if (m == NULL)
2363 			goto out;
2364 	}
2365 
2366 	LIST_FOREACH(dbif, &sc->sc_iflist, bif_next) {
2367 		dst_if = dbif->bif_ifp;
2368 		if (dst_if == src_if)
2369 			continue;
2370 
2371 		/* Private segments can not talk to each other */
2372 		if (sbif && (sbif->bif_flags & dbif->bif_flags & IFBIF_PRIVATE))
2373 			continue;
2374 
2375 		if ((dbif->bif_flags & IFBIF_STP) &&
2376 		    dbif->bif_stp.bp_state == BSTP_IFSTATE_DISCARDING)
2377 			continue;
2378 
2379 		if ((dbif->bif_flags & IFBIF_DISCOVER) == 0 &&
2380 		    (m->m_flags & (M_BCAST|M_MCAST)) == 0)
2381 			continue;
2382 
2383 		if ((dst_if->if_drv_flags & IFF_DRV_RUNNING) == 0)
2384 			continue;
2385 
2386 		if (LIST_NEXT(dbif, bif_next) == NULL) {
2387 			mc = m;
2388 			used = 1;
2389 		} else {
2390 			mc = m_dup(m, M_DONTWAIT);
2391 			if (mc == NULL) {
2392 				sc->sc_ifp->if_oerrors++;
2393 				continue;
2394 			}
2395 		}
2396 
2397 		/*
2398 		 * Filter on the output interface. Pass a NULL bridge interface
2399 		 * pointer so we do not redundantly filter on the bridge for
2400 		 * each interface we broadcast on.
2401 		 */
2402 		if (runfilt && (PFIL_HOOKED(&inet_pfil_hook)
2403 #ifdef INET6
2404 		    || PFIL_HOOKED(&inet6_pfil_hook)
2405 #endif
2406 		    )) {
2407 			if (used == 0) {
2408 				/* Keep the layer3 header aligned */
2409 				i = min(mc->m_pkthdr.len, max_protohdr);
2410 				mc = m_copyup(mc, i, ETHER_ALIGN);
2411 				if (mc == NULL) {
2412 					sc->sc_ifp->if_oerrors++;
2413 					continue;
2414 				}
2415 			}
2416 			if (bridge_pfil(&mc, NULL, dst_if, PFIL_OUT) != 0)
2417 				continue;
2418 			if (mc == NULL)
2419 				continue;
2420 		}
2421 
2422 		bridge_enqueue(sc, dst_if, mc);
2423 	}
2424 	if (used == 0)
2425 		m_freem(m);
2426 
2427 out:
2428 	BRIDGE_UNREF(sc);
2429 }
2430 
2431 /*
2432  * bridge_span:
2433  *
2434  *	Duplicate a packet out one or more interfaces that are in span mode,
2435  *	the original mbuf is unmodified.
2436  */
2437 static void
2438 bridge_span(struct bridge_softc *sc, struct mbuf *m)
2439 {
2440 	struct bridge_iflist *bif;
2441 	struct ifnet *dst_if;
2442 	struct mbuf *mc;
2443 
2444 	if (LIST_EMPTY(&sc->sc_spanlist))
2445 		return;
2446 
2447 	LIST_FOREACH(bif, &sc->sc_spanlist, bif_next) {
2448 		dst_if = bif->bif_ifp;
2449 
2450 		if ((dst_if->if_drv_flags & IFF_DRV_RUNNING) == 0)
2451 			continue;
2452 
2453 		mc = m_copypacket(m, M_DONTWAIT);
2454 		if (mc == NULL) {
2455 			sc->sc_ifp->if_oerrors++;
2456 			continue;
2457 		}
2458 
2459 		bridge_enqueue(sc, dst_if, mc);
2460 	}
2461 }
2462 
2463 /*
2464  * bridge_rtupdate:
2465  *
2466  *	Add a bridge routing entry.
2467  */
2468 static int
2469 bridge_rtupdate(struct bridge_softc *sc, const uint8_t *dst, uint16_t vlan,
2470     struct bridge_iflist *bif, int setflags, uint8_t flags)
2471 {
2472 	struct bridge_rtnode *brt;
2473 	int error;
2474 
2475 	BRIDGE_LOCK_ASSERT(sc);
2476 
2477 	/* Check the source address is valid and not multicast. */
2478 	if (ETHER_IS_MULTICAST(dst) ||
2479 	    (dst[0] == 0 && dst[1] == 0 && dst[2] == 0 &&
2480 	     dst[3] == 0 && dst[4] == 0 && dst[5] == 0) != 0)
2481 		return (EINVAL);
2482 
2483 	/* 802.1p frames map to vlan 1 */
2484 	if (vlan == 0)
2485 		vlan = 1;
2486 
2487 	/*
2488 	 * A route for this destination might already exist.  If so,
2489 	 * update it, otherwise create a new one.
2490 	 */
2491 	if ((brt = bridge_rtnode_lookup(sc, dst, vlan)) == NULL) {
2492 		if (sc->sc_brtcnt >= sc->sc_brtmax) {
2493 			sc->sc_brtexceeded++;
2494 			return (ENOSPC);
2495 		}
2496 		/* Check per interface address limits (if enabled) */
2497 		if (bif->bif_addrmax && bif->bif_addrcnt >= bif->bif_addrmax) {
2498 			bif->bif_addrexceeded++;
2499 			return (ENOSPC);
2500 		}
2501 
2502 		/*
2503 		 * Allocate a new bridge forwarding node, and
2504 		 * initialize the expiration time and Ethernet
2505 		 * address.
2506 		 */
2507 		brt = uma_zalloc(bridge_rtnode_zone, M_NOWAIT | M_ZERO);
2508 		if (brt == NULL)
2509 			return (ENOMEM);
2510 
2511 		if (bif->bif_flags & IFBIF_STICKY)
2512 			brt->brt_flags = IFBAF_STICKY;
2513 		else
2514 			brt->brt_flags = IFBAF_DYNAMIC;
2515 
2516 		memcpy(brt->brt_addr, dst, ETHER_ADDR_LEN);
2517 		brt->brt_vlan = vlan;
2518 
2519 		if ((error = bridge_rtnode_insert(sc, brt)) != 0) {
2520 			uma_zfree(bridge_rtnode_zone, brt);
2521 			return (error);
2522 		}
2523 		brt->brt_dst = bif;
2524 		bif->bif_addrcnt++;
2525 	}
2526 
2527 	if ((brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC &&
2528 	    brt->brt_dst != bif) {
2529 		brt->brt_dst->bif_addrcnt--;
2530 		brt->brt_dst = bif;
2531 		brt->brt_dst->bif_addrcnt++;
2532 	}
2533 
2534 	if ((flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC)
2535 		brt->brt_expire = time_uptime + sc->sc_brttimeout;
2536 	if (setflags)
2537 		brt->brt_flags = flags;
2538 
2539 	return (0);
2540 }
2541 
2542 /*
2543  * bridge_rtlookup:
2544  *
2545  *	Lookup the destination interface for an address.
2546  */
2547 static struct ifnet *
2548 bridge_rtlookup(struct bridge_softc *sc, const uint8_t *addr, uint16_t vlan)
2549 {
2550 	struct bridge_rtnode *brt;
2551 
2552 	BRIDGE_LOCK_ASSERT(sc);
2553 
2554 	if ((brt = bridge_rtnode_lookup(sc, addr, vlan)) == NULL)
2555 		return (NULL);
2556 
2557 	return (brt->brt_ifp);
2558 }
2559 
2560 /*
2561  * bridge_rttrim:
2562  *
2563  *	Trim the routine table so that we have a number
2564  *	of routing entries less than or equal to the
2565  *	maximum number.
2566  */
2567 static void
2568 bridge_rttrim(struct bridge_softc *sc)
2569 {
2570 	struct bridge_rtnode *brt, *nbrt;
2571 
2572 	BRIDGE_LOCK_ASSERT(sc);
2573 
2574 	/* Make sure we actually need to do this. */
2575 	if (sc->sc_brtcnt <= sc->sc_brtmax)
2576 		return;
2577 
2578 	/* Force an aging cycle; this might trim enough addresses. */
2579 	bridge_rtage(sc);
2580 	if (sc->sc_brtcnt <= sc->sc_brtmax)
2581 		return;
2582 
2583 	LIST_FOREACH_SAFE(brt, &sc->sc_rtlist, brt_list, nbrt) {
2584 		if ((brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC) {
2585 			bridge_rtnode_destroy(sc, brt);
2586 			if (sc->sc_brtcnt <= sc->sc_brtmax)
2587 				return;
2588 		}
2589 	}
2590 }
2591 
2592 /*
2593  * bridge_timer:
2594  *
2595  *	Aging timer for the bridge.
2596  */
2597 static void
2598 bridge_timer(void *arg)
2599 {
2600 	struct bridge_softc *sc = arg;
2601 
2602 	BRIDGE_LOCK_ASSERT(sc);
2603 
2604 	bridge_rtage(sc);
2605 
2606 	if (sc->sc_ifp->if_drv_flags & IFF_DRV_RUNNING)
2607 		callout_reset(&sc->sc_brcallout,
2608 		    bridge_rtable_prune_period * hz, bridge_timer, sc);
2609 }
2610 
2611 /*
2612  * bridge_rtage:
2613  *
2614  *	Perform an aging cycle.
2615  */
2616 static void
2617 bridge_rtage(struct bridge_softc *sc)
2618 {
2619 	struct bridge_rtnode *brt, *nbrt;
2620 
2621 	BRIDGE_LOCK_ASSERT(sc);
2622 
2623 	LIST_FOREACH_SAFE(brt, &sc->sc_rtlist, brt_list, nbrt) {
2624 		if ((brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC) {
2625 			if (time_uptime >= brt->brt_expire)
2626 				bridge_rtnode_destroy(sc, brt);
2627 		}
2628 	}
2629 }
2630 
2631 /*
2632  * bridge_rtflush:
2633  *
2634  *	Remove all dynamic addresses from the bridge.
2635  */
2636 static void
2637 bridge_rtflush(struct bridge_softc *sc, int full)
2638 {
2639 	struct bridge_rtnode *brt, *nbrt;
2640 
2641 	BRIDGE_LOCK_ASSERT(sc);
2642 
2643 	LIST_FOREACH_SAFE(brt, &sc->sc_rtlist, brt_list, nbrt) {
2644 		if (full || (brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC)
2645 			bridge_rtnode_destroy(sc, brt);
2646 	}
2647 }
2648 
2649 /*
2650  * bridge_rtdaddr:
2651  *
2652  *	Remove an address from the table.
2653  */
2654 static int
2655 bridge_rtdaddr(struct bridge_softc *sc, const uint8_t *addr, uint16_t vlan)
2656 {
2657 	struct bridge_rtnode *brt;
2658 	int found = 0;
2659 
2660 	BRIDGE_LOCK_ASSERT(sc);
2661 
2662 	/*
2663 	 * If vlan is zero then we want to delete for all vlans so the lookup
2664 	 * may return more than one.
2665 	 */
2666 	while ((brt = bridge_rtnode_lookup(sc, addr, vlan)) != NULL) {
2667 		bridge_rtnode_destroy(sc, brt);
2668 		found = 1;
2669 	}
2670 
2671 	return (found ? 0 : ENOENT);
2672 }
2673 
2674 /*
2675  * bridge_rtdelete:
2676  *
2677  *	Delete routes to a speicifc member interface.
2678  */
2679 static void
2680 bridge_rtdelete(struct bridge_softc *sc, struct ifnet *ifp, int full)
2681 {
2682 	struct bridge_rtnode *brt, *nbrt;
2683 
2684 	BRIDGE_LOCK_ASSERT(sc);
2685 
2686 	LIST_FOREACH_SAFE(brt, &sc->sc_rtlist, brt_list, nbrt) {
2687 		if (brt->brt_ifp == ifp && (full ||
2688 			    (brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC))
2689 			bridge_rtnode_destroy(sc, brt);
2690 	}
2691 }
2692 
2693 /*
2694  * bridge_rtable_init:
2695  *
2696  *	Initialize the route table for this bridge.
2697  */
2698 static int
2699 bridge_rtable_init(struct bridge_softc *sc)
2700 {
2701 	int i;
2702 
2703 	sc->sc_rthash = malloc(sizeof(*sc->sc_rthash) * BRIDGE_RTHASH_SIZE,
2704 	    M_DEVBUF, M_NOWAIT);
2705 	if (sc->sc_rthash == NULL)
2706 		return (ENOMEM);
2707 
2708 	for (i = 0; i < BRIDGE_RTHASH_SIZE; i++)
2709 		LIST_INIT(&sc->sc_rthash[i]);
2710 
2711 	sc->sc_rthash_key = arc4random();
2712 
2713 	LIST_INIT(&sc->sc_rtlist);
2714 
2715 	return (0);
2716 }
2717 
2718 /*
2719  * bridge_rtable_fini:
2720  *
2721  *	Deconstruct the route table for this bridge.
2722  */
2723 static void
2724 bridge_rtable_fini(struct bridge_softc *sc)
2725 {
2726 
2727 	KASSERT(sc->sc_brtcnt == 0,
2728 	    ("%s: %d bridge routes referenced", __func__, sc->sc_brtcnt));
2729 	free(sc->sc_rthash, M_DEVBUF);
2730 }
2731 
2732 /*
2733  * The following hash function is adapted from "Hash Functions" by Bob Jenkins
2734  * ("Algorithm Alley", Dr. Dobbs Journal, September 1997).
2735  */
2736 #define	mix(a, b, c)							\
2737 do {									\
2738 	a -= b; a -= c; a ^= (c >> 13);					\
2739 	b -= c; b -= a; b ^= (a << 8);					\
2740 	c -= a; c -= b; c ^= (b >> 13);					\
2741 	a -= b; a -= c; a ^= (c >> 12);					\
2742 	b -= c; b -= a; b ^= (a << 16);					\
2743 	c -= a; c -= b; c ^= (b >> 5);					\
2744 	a -= b; a -= c; a ^= (c >> 3);					\
2745 	b -= c; b -= a; b ^= (a << 10);					\
2746 	c -= a; c -= b; c ^= (b >> 15);					\
2747 } while (/*CONSTCOND*/0)
2748 
2749 static __inline uint32_t
2750 bridge_rthash(struct bridge_softc *sc, const uint8_t *addr)
2751 {
2752 	uint32_t a = 0x9e3779b9, b = 0x9e3779b9, c = sc->sc_rthash_key;
2753 
2754 	b += addr[5] << 8;
2755 	b += addr[4];
2756 	a += addr[3] << 24;
2757 	a += addr[2] << 16;
2758 	a += addr[1] << 8;
2759 	a += addr[0];
2760 
2761 	mix(a, b, c);
2762 
2763 	return (c & BRIDGE_RTHASH_MASK);
2764 }
2765 
2766 #undef mix
2767 
2768 static int
2769 bridge_rtnode_addr_cmp(const uint8_t *a, const uint8_t *b)
2770 {
2771 	int i, d;
2772 
2773 	for (i = 0, d = 0; i < ETHER_ADDR_LEN && d == 0; i++) {
2774 		d = ((int)a[i]) - ((int)b[i]);
2775 	}
2776 
2777 	return (d);
2778 }
2779 
2780 /*
2781  * bridge_rtnode_lookup:
2782  *
2783  *	Look up a bridge route node for the specified destination. Compare the
2784  *	vlan id or if zero then just return the first match.
2785  */
2786 static struct bridge_rtnode *
2787 bridge_rtnode_lookup(struct bridge_softc *sc, const uint8_t *addr, uint16_t vlan)
2788 {
2789 	struct bridge_rtnode *brt;
2790 	uint32_t hash;
2791 	int dir;
2792 
2793 	BRIDGE_LOCK_ASSERT(sc);
2794 
2795 	hash = bridge_rthash(sc, addr);
2796 	LIST_FOREACH(brt, &sc->sc_rthash[hash], brt_hash) {
2797 		dir = bridge_rtnode_addr_cmp(addr, brt->brt_addr);
2798 		if (dir == 0 && (brt->brt_vlan == vlan || vlan == 0))
2799 			return (brt);
2800 		if (dir > 0)
2801 			return (NULL);
2802 	}
2803 
2804 	return (NULL);
2805 }
2806 
2807 /*
2808  * bridge_rtnode_insert:
2809  *
2810  *	Insert the specified bridge node into the route table.  We
2811  *	assume the entry is not already in the table.
2812  */
2813 static int
2814 bridge_rtnode_insert(struct bridge_softc *sc, struct bridge_rtnode *brt)
2815 {
2816 	struct bridge_rtnode *lbrt;
2817 	uint32_t hash;
2818 	int dir;
2819 
2820 	BRIDGE_LOCK_ASSERT(sc);
2821 
2822 	hash = bridge_rthash(sc, brt->brt_addr);
2823 
2824 	lbrt = LIST_FIRST(&sc->sc_rthash[hash]);
2825 	if (lbrt == NULL) {
2826 		LIST_INSERT_HEAD(&sc->sc_rthash[hash], brt, brt_hash);
2827 		goto out;
2828 	}
2829 
2830 	do {
2831 		dir = bridge_rtnode_addr_cmp(brt->brt_addr, lbrt->brt_addr);
2832 		if (dir == 0 && brt->brt_vlan == lbrt->brt_vlan)
2833 			return (EEXIST);
2834 		if (dir > 0) {
2835 			LIST_INSERT_BEFORE(lbrt, brt, brt_hash);
2836 			goto out;
2837 		}
2838 		if (LIST_NEXT(lbrt, brt_hash) == NULL) {
2839 			LIST_INSERT_AFTER(lbrt, brt, brt_hash);
2840 			goto out;
2841 		}
2842 		lbrt = LIST_NEXT(lbrt, brt_hash);
2843 	} while (lbrt != NULL);
2844 
2845 #ifdef DIAGNOSTIC
2846 	panic("bridge_rtnode_insert: impossible");
2847 #endif
2848 
2849 out:
2850 	LIST_INSERT_HEAD(&sc->sc_rtlist, brt, brt_list);
2851 	sc->sc_brtcnt++;
2852 
2853 	return (0);
2854 }
2855 
2856 /*
2857  * bridge_rtnode_destroy:
2858  *
2859  *	Destroy a bridge rtnode.
2860  */
2861 static void
2862 bridge_rtnode_destroy(struct bridge_softc *sc, struct bridge_rtnode *brt)
2863 {
2864 	BRIDGE_LOCK_ASSERT(sc);
2865 
2866 	LIST_REMOVE(brt, brt_hash);
2867 
2868 	LIST_REMOVE(brt, brt_list);
2869 	sc->sc_brtcnt--;
2870 	brt->brt_dst->bif_addrcnt--;
2871 	uma_zfree(bridge_rtnode_zone, brt);
2872 }
2873 
2874 /*
2875  * bridge_rtable_expire:
2876  *
2877  *	Set the expiry time for all routes on an interface.
2878  */
2879 static void
2880 bridge_rtable_expire(struct ifnet *ifp, int age)
2881 {
2882 	struct bridge_softc *sc = ifp->if_bridge;
2883 	struct bridge_rtnode *brt;
2884 
2885 	BRIDGE_LOCK(sc);
2886 
2887 	/*
2888 	 * If the age is zero then flush, otherwise set all the expiry times to
2889 	 * age for the interface
2890 	 */
2891 	if (age == 0)
2892 		bridge_rtdelete(sc, ifp, IFBF_FLUSHDYN);
2893 	else {
2894 		LIST_FOREACH(brt, &sc->sc_rtlist, brt_list) {
2895 			/* Cap the expiry time to 'age' */
2896 			if (brt->brt_ifp == ifp &&
2897 			    brt->brt_expire > time_uptime + age &&
2898 			    (brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC)
2899 				brt->brt_expire = time_uptime + age;
2900 		}
2901 	}
2902 	BRIDGE_UNLOCK(sc);
2903 }
2904 
2905 /*
2906  * bridge_state_change:
2907  *
2908  *	Callback from the bridgestp code when a port changes states.
2909  */
2910 static void
2911 bridge_state_change(struct ifnet *ifp, int state)
2912 {
2913 	struct bridge_softc *sc = ifp->if_bridge;
2914 	static const char *stpstates[] = {
2915 		"disabled",
2916 		"listening",
2917 		"learning",
2918 		"forwarding",
2919 		"blocking",
2920 		"discarding"
2921 	};
2922 
2923 	if (log_stp)
2924 		log(LOG_NOTICE, "%s: state changed to %s on %s\n",
2925 		    sc->sc_ifp->if_xname, stpstates[state], ifp->if_xname);
2926 }
2927 
2928 /*
2929  * Send bridge packets through pfil if they are one of the types pfil can deal
2930  * with, or if they are ARP or REVARP.  (pfil will pass ARP and REVARP without
2931  * question.) If *bifp or *ifp are NULL then packet filtering is skipped for
2932  * that interface.
2933  */
2934 static int
2935 bridge_pfil(struct mbuf **mp, struct ifnet *bifp, struct ifnet *ifp, int dir)
2936 {
2937 	int snap, error, i, hlen;
2938 	struct ether_header *eh1, eh2;
2939 	struct ip_fw_args args;
2940 	struct ip *ip;
2941 	struct llc llc1;
2942 	u_int16_t ether_type;
2943 
2944 	snap = 0;
2945 	error = -1;	/* Default error if not error == 0 */
2946 
2947 #if 0
2948 	/* we may return with the IP fields swapped, ensure its not shared */
2949 	KASSERT(M_WRITABLE(*mp), ("%s: modifying a shared mbuf", __func__));
2950 #endif
2951 
2952 	if (pfil_bridge == 0 && pfil_member == 0 && pfil_ipfw == 0)
2953 		return (0); /* filtering is disabled */
2954 
2955 	i = min((*mp)->m_pkthdr.len, max_protohdr);
2956 	if ((*mp)->m_len < i) {
2957 	    *mp = m_pullup(*mp, i);
2958 	    if (*mp == NULL) {
2959 		printf("%s: m_pullup failed\n", __func__);
2960 		return (-1);
2961 	    }
2962 	}
2963 
2964 	eh1 = mtod(*mp, struct ether_header *);
2965 	ether_type = ntohs(eh1->ether_type);
2966 
2967 	/*
2968 	 * Check for SNAP/LLC.
2969 	 */
2970 	if (ether_type < ETHERMTU) {
2971 		struct llc *llc2 = (struct llc *)(eh1 + 1);
2972 
2973 		if ((*mp)->m_len >= ETHER_HDR_LEN + 8 &&
2974 		    llc2->llc_dsap == LLC_SNAP_LSAP &&
2975 		    llc2->llc_ssap == LLC_SNAP_LSAP &&
2976 		    llc2->llc_control == LLC_UI) {
2977 			ether_type = htons(llc2->llc_un.type_snap.ether_type);
2978 			snap = 1;
2979 		}
2980 	}
2981 
2982 	/*
2983 	 * If we're trying to filter bridge traffic, don't look at anything
2984 	 * other than IP and ARP traffic.  If the filter doesn't understand
2985 	 * IPv6, don't allow IPv6 through the bridge either.  This is lame
2986 	 * since if we really wanted, say, an AppleTalk filter, we are hosed,
2987 	 * but of course we don't have an AppleTalk filter to begin with.
2988 	 * (Note that since pfil doesn't understand ARP it will pass *ALL*
2989 	 * ARP traffic.)
2990 	 */
2991 	switch (ether_type) {
2992 		case ETHERTYPE_ARP:
2993 		case ETHERTYPE_REVARP:
2994 			if (pfil_ipfw_arp == 0)
2995 				return (0); /* Automatically pass */
2996 			break;
2997 
2998 		case ETHERTYPE_IP:
2999 #ifdef INET6
3000 		case ETHERTYPE_IPV6:
3001 #endif /* INET6 */
3002 			break;
3003 		default:
3004 			/*
3005 			 * Check to see if the user wants to pass non-ip
3006 			 * packets, these will not be checked by pfil(9) and
3007 			 * passed unconditionally so the default is to drop.
3008 			 */
3009 			if (pfil_onlyip)
3010 				goto bad;
3011 	}
3012 
3013 	/* Strip off the Ethernet header and keep a copy. */
3014 	m_copydata(*mp, 0, ETHER_HDR_LEN, (caddr_t) &eh2);
3015 	m_adj(*mp, ETHER_HDR_LEN);
3016 
3017 	/* Strip off snap header, if present */
3018 	if (snap) {
3019 		m_copydata(*mp, 0, sizeof(struct llc), (caddr_t) &llc1);
3020 		m_adj(*mp, sizeof(struct llc));
3021 	}
3022 
3023 	/*
3024 	 * Check the IP header for alignment and errors
3025 	 */
3026 	if (dir == PFIL_IN) {
3027 		switch (ether_type) {
3028 			case ETHERTYPE_IP:
3029 				error = bridge_ip_checkbasic(mp);
3030 				break;
3031 #ifdef INET6
3032 			case ETHERTYPE_IPV6:
3033 				error = bridge_ip6_checkbasic(mp);
3034 				break;
3035 #endif /* INET6 */
3036 			default:
3037 				error = 0;
3038 		}
3039 		if (error)
3040 			goto bad;
3041 	}
3042 
3043 	if (IPFW_LOADED && pfil_ipfw != 0 && dir == PFIL_OUT && ifp != NULL) {
3044 		INIT_VNET_IPFW(curvnet);
3045 
3046 		error = -1;
3047 		args.rule = ip_dn_claim_rule(*mp);
3048 		if (args.rule != NULL && V_fw_one_pass)
3049 			goto ipfwpass; /* packet already partially processed */
3050 
3051 		args.m = *mp;
3052 		args.oif = ifp;
3053 		args.next_hop = NULL;
3054 		args.eh = &eh2;
3055 		args.inp = NULL;	/* used by ipfw uid/gid/jail rules */
3056 		i = ip_fw_chk_ptr(&args);
3057 		*mp = args.m;
3058 
3059 		if (*mp == NULL)
3060 			return (error);
3061 
3062 		if (DUMMYNET_LOADED && (i == IP_FW_DUMMYNET)) {
3063 
3064 			/* put the Ethernet header back on */
3065 			M_PREPEND(*mp, ETHER_HDR_LEN, M_DONTWAIT);
3066 			if (*mp == NULL)
3067 				return (error);
3068 			bcopy(&eh2, mtod(*mp, caddr_t), ETHER_HDR_LEN);
3069 
3070 			/*
3071 			 * Pass the pkt to dummynet, which consumes it. The
3072 			 * packet will return to us via bridge_dummynet().
3073 			 */
3074 			args.oif = ifp;
3075 			ip_dn_io_ptr(mp, DN_TO_IFB_FWD, &args);
3076 			return (error);
3077 		}
3078 
3079 		if (i != IP_FW_PASS) /* drop */
3080 			goto bad;
3081 	}
3082 
3083 ipfwpass:
3084 	error = 0;
3085 
3086 	/*
3087 	 * Run the packet through pfil
3088 	 */
3089 	switch (ether_type) {
3090 	case ETHERTYPE_IP:
3091 		/*
3092 		 * before calling the firewall, swap fields the same as
3093 		 * IP does. here we assume the header is contiguous
3094 		 */
3095 		ip = mtod(*mp, struct ip *);
3096 
3097 		ip->ip_len = ntohs(ip->ip_len);
3098 		ip->ip_off = ntohs(ip->ip_off);
3099 
3100 		/*
3101 		 * Run pfil on the member interface and the bridge, both can
3102 		 * be skipped by clearing pfil_member or pfil_bridge.
3103 		 *
3104 		 * Keep the order:
3105 		 *   in_if -> bridge_if -> out_if
3106 		 */
3107 		if (pfil_bridge && dir == PFIL_OUT && bifp != NULL)
3108 			error = pfil_run_hooks(&inet_pfil_hook, mp, bifp,
3109 					dir, NULL);
3110 
3111 		if (*mp == NULL || error != 0) /* filter may consume */
3112 			break;
3113 
3114 		if (pfil_member && ifp != NULL)
3115 			error = pfil_run_hooks(&inet_pfil_hook, mp, ifp,
3116 					dir, NULL);
3117 
3118 		if (*mp == NULL || error != 0) /* filter may consume */
3119 			break;
3120 
3121 		if (pfil_bridge && dir == PFIL_IN && bifp != NULL)
3122 			error = pfil_run_hooks(&inet_pfil_hook, mp, bifp,
3123 					dir, NULL);
3124 
3125 		if (*mp == NULL || error != 0) /* filter may consume */
3126 			break;
3127 
3128 		/* check if we need to fragment the packet */
3129 		if (pfil_member && ifp != NULL && dir == PFIL_OUT) {
3130 			i = (*mp)->m_pkthdr.len;
3131 			if (i > ifp->if_mtu) {
3132 				error = bridge_fragment(ifp, *mp, &eh2, snap,
3133 					    &llc1);
3134 				return (error);
3135 			}
3136 		}
3137 
3138 		/* Recalculate the ip checksum and restore byte ordering */
3139 		ip = mtod(*mp, struct ip *);
3140 		hlen = ip->ip_hl << 2;
3141 		if (hlen < sizeof(struct ip))
3142 			goto bad;
3143 		if (hlen > (*mp)->m_len) {
3144 			if ((*mp = m_pullup(*mp, hlen)) == 0)
3145 				goto bad;
3146 			ip = mtod(*mp, struct ip *);
3147 			if (ip == NULL)
3148 				goto bad;
3149 		}
3150 		ip->ip_len = htons(ip->ip_len);
3151 		ip->ip_off = htons(ip->ip_off);
3152 		ip->ip_sum = 0;
3153 		if (hlen == sizeof(struct ip))
3154 			ip->ip_sum = in_cksum_hdr(ip);
3155 		else
3156 			ip->ip_sum = in_cksum(*mp, hlen);
3157 
3158 		break;
3159 #ifdef INET6
3160 	case ETHERTYPE_IPV6:
3161 		if (pfil_bridge && dir == PFIL_OUT && bifp != NULL)
3162 			error = pfil_run_hooks(&inet6_pfil_hook, mp, bifp,
3163 					dir, NULL);
3164 
3165 		if (*mp == NULL || error != 0) /* filter may consume */
3166 			break;
3167 
3168 		if (pfil_member && ifp != NULL)
3169 			error = pfil_run_hooks(&inet6_pfil_hook, mp, ifp,
3170 					dir, NULL);
3171 
3172 		if (*mp == NULL || error != 0) /* filter may consume */
3173 			break;
3174 
3175 		if (pfil_bridge && dir == PFIL_IN && bifp != NULL)
3176 			error = pfil_run_hooks(&inet6_pfil_hook, mp, bifp,
3177 					dir, NULL);
3178 		break;
3179 #endif
3180 	default:
3181 		error = 0;
3182 		break;
3183 	}
3184 
3185 	if (*mp == NULL)
3186 		return (error);
3187 	if (error != 0)
3188 		goto bad;
3189 
3190 	error = -1;
3191 
3192 	/*
3193 	 * Finally, put everything back the way it was and return
3194 	 */
3195 	if (snap) {
3196 		M_PREPEND(*mp, sizeof(struct llc), M_DONTWAIT);
3197 		if (*mp == NULL)
3198 			return (error);
3199 		bcopy(&llc1, mtod(*mp, caddr_t), sizeof(struct llc));
3200 	}
3201 
3202 	M_PREPEND(*mp, ETHER_HDR_LEN, M_DONTWAIT);
3203 	if (*mp == NULL)
3204 		return (error);
3205 	bcopy(&eh2, mtod(*mp, caddr_t), ETHER_HDR_LEN);
3206 
3207 	return (0);
3208 
3209 bad:
3210 	m_freem(*mp);
3211 	*mp = NULL;
3212 	return (error);
3213 }
3214 
3215 /*
3216  * Perform basic checks on header size since
3217  * pfil assumes ip_input has already processed
3218  * it for it.  Cut-and-pasted from ip_input.c.
3219  * Given how simple the IPv6 version is,
3220  * does the IPv4 version really need to be
3221  * this complicated?
3222  *
3223  * XXX Should we update ipstat here, or not?
3224  * XXX Right now we update ipstat but not
3225  * XXX csum_counter.
3226  */
3227 static int
3228 bridge_ip_checkbasic(struct mbuf **mp)
3229 {
3230 	INIT_VNET_INET(curvnet);
3231 	struct mbuf *m = *mp;
3232 	struct ip *ip;
3233 	int len, hlen;
3234 	u_short sum;
3235 
3236 	if (*mp == NULL)
3237 		return (-1);
3238 
3239 	if (IP_HDR_ALIGNED_P(mtod(m, caddr_t)) == 0) {
3240 		if ((m = m_copyup(m, sizeof(struct ip),
3241 			(max_linkhdr + 3) & ~3)) == NULL) {
3242 			/* XXXJRT new stat, please */
3243 			V_ipstat.ips_toosmall++;
3244 			goto bad;
3245 		}
3246 	} else if (__predict_false(m->m_len < sizeof (struct ip))) {
3247 		if ((m = m_pullup(m, sizeof (struct ip))) == NULL) {
3248 			V_ipstat.ips_toosmall++;
3249 			goto bad;
3250 		}
3251 	}
3252 	ip = mtod(m, struct ip *);
3253 	if (ip == NULL) goto bad;
3254 
3255 	if (ip->ip_v != IPVERSION) {
3256 		V_ipstat.ips_badvers++;
3257 		goto bad;
3258 	}
3259 	hlen = ip->ip_hl << 2;
3260 	if (hlen < sizeof(struct ip)) { /* minimum header length */
3261 		V_ipstat.ips_badhlen++;
3262 		goto bad;
3263 	}
3264 	if (hlen > m->m_len) {
3265 		if ((m = m_pullup(m, hlen)) == 0) {
3266 			V_ipstat.ips_badhlen++;
3267 			goto bad;
3268 		}
3269 		ip = mtod(m, struct ip *);
3270 		if (ip == NULL) goto bad;
3271 	}
3272 
3273 	if (m->m_pkthdr.csum_flags & CSUM_IP_CHECKED) {
3274 		sum = !(m->m_pkthdr.csum_flags & CSUM_IP_VALID);
3275 	} else {
3276 		if (hlen == sizeof(struct ip)) {
3277 			sum = in_cksum_hdr(ip);
3278 		} else {
3279 			sum = in_cksum(m, hlen);
3280 		}
3281 	}
3282 	if (sum) {
3283 		V_ipstat.ips_badsum++;
3284 		goto bad;
3285 	}
3286 
3287 	/* Retrieve the packet length. */
3288 	len = ntohs(ip->ip_len);
3289 
3290 	/*
3291 	 * Check for additional length bogosity
3292 	 */
3293 	if (len < hlen) {
3294 		V_ipstat.ips_badlen++;
3295 		goto bad;
3296 	}
3297 
3298 	/*
3299 	 * Check that the amount of data in the buffers
3300 	 * is as at least much as the IP header would have us expect.
3301 	 * Drop packet if shorter than we expect.
3302 	 */
3303 	if (m->m_pkthdr.len < len) {
3304 		V_ipstat.ips_tooshort++;
3305 		goto bad;
3306 	}
3307 
3308 	/* Checks out, proceed */
3309 	*mp = m;
3310 	return (0);
3311 
3312 bad:
3313 	*mp = m;
3314 	return (-1);
3315 }
3316 
3317 #ifdef INET6
3318 /*
3319  * Same as above, but for IPv6.
3320  * Cut-and-pasted from ip6_input.c.
3321  * XXX Should we update ip6stat, or not?
3322  */
3323 static int
3324 bridge_ip6_checkbasic(struct mbuf **mp)
3325 {
3326 	INIT_VNET_INET6(curvnet);
3327 	struct mbuf *m = *mp;
3328 	struct ip6_hdr *ip6;
3329 
3330 	/*
3331 	 * If the IPv6 header is not aligned, slurp it up into a new
3332 	 * mbuf with space for link headers, in the event we forward
3333 	 * it.  Otherwise, if it is aligned, make sure the entire base
3334 	 * IPv6 header is in the first mbuf of the chain.
3335 	 */
3336 	if (IP6_HDR_ALIGNED_P(mtod(m, caddr_t)) == 0) {
3337 		struct ifnet *inifp = m->m_pkthdr.rcvif;
3338 		if ((m = m_copyup(m, sizeof(struct ip6_hdr),
3339 			    (max_linkhdr + 3) & ~3)) == NULL) {
3340 			/* XXXJRT new stat, please */
3341 			V_ip6stat.ip6s_toosmall++;
3342 			in6_ifstat_inc(inifp, ifs6_in_hdrerr);
3343 			goto bad;
3344 		}
3345 	} else if (__predict_false(m->m_len < sizeof(struct ip6_hdr))) {
3346 		struct ifnet *inifp = m->m_pkthdr.rcvif;
3347 		if ((m = m_pullup(m, sizeof(struct ip6_hdr))) == NULL) {
3348 			V_ip6stat.ip6s_toosmall++;
3349 			in6_ifstat_inc(inifp, ifs6_in_hdrerr);
3350 			goto bad;
3351 		}
3352 	}
3353 
3354 	ip6 = mtod(m, struct ip6_hdr *);
3355 
3356 	if ((ip6->ip6_vfc & IPV6_VERSION_MASK) != IPV6_VERSION) {
3357 		V_ip6stat.ip6s_badvers++;
3358 		in6_ifstat_inc(m->m_pkthdr.rcvif, ifs6_in_hdrerr);
3359 		goto bad;
3360 	}
3361 
3362 	/* Checks out, proceed */
3363 	*mp = m;
3364 	return (0);
3365 
3366 bad:
3367 	*mp = m;
3368 	return (-1);
3369 }
3370 #endif /* INET6 */
3371 
3372 /*
3373  * bridge_fragment:
3374  *
3375  *	Return a fragmented mbuf chain.
3376  */
3377 static int
3378 bridge_fragment(struct ifnet *ifp, struct mbuf *m, struct ether_header *eh,
3379     int snap, struct llc *llc)
3380 {
3381 	INIT_VNET_INET(curvnet);
3382 	struct mbuf *m0;
3383 	struct ip *ip;
3384 	int error = -1;
3385 
3386 	if (m->m_len < sizeof(struct ip) &&
3387 	    (m = m_pullup(m, sizeof(struct ip))) == NULL)
3388 		goto out;
3389 	ip = mtod(m, struct ip *);
3390 
3391 	error = ip_fragment(ip, &m, ifp->if_mtu, ifp->if_hwassist,
3392 		    CSUM_DELAY_IP);
3393 	if (error)
3394 		goto out;
3395 
3396 	/* walk the chain and re-add the Ethernet header */
3397 	for (m0 = m; m0; m0 = m0->m_nextpkt) {
3398 		if (error == 0) {
3399 			if (snap) {
3400 				M_PREPEND(m0, sizeof(struct llc), M_DONTWAIT);
3401 				if (m0 == NULL) {
3402 					error = ENOBUFS;
3403 					continue;
3404 				}
3405 				bcopy(llc, mtod(m0, caddr_t),
3406 				    sizeof(struct llc));
3407 			}
3408 			M_PREPEND(m0, ETHER_HDR_LEN, M_DONTWAIT);
3409 			if (m0 == NULL) {
3410 				error = ENOBUFS;
3411 				continue;
3412 			}
3413 			bcopy(eh, mtod(m0, caddr_t), ETHER_HDR_LEN);
3414 		} else
3415 			m_freem(m);
3416 	}
3417 
3418 	if (error == 0)
3419 		V_ipstat.ips_fragmented++;
3420 
3421 	return (error);
3422 
3423 out:
3424 	if (m != NULL)
3425 		m_freem(m);
3426 	return (error);
3427 }
3428