xref: /freebsd/sys/net/if_bridge.c (revision 84ee9401a3fc8d3c22424266f421a928989cd692)
1 /*	$NetBSD: if_bridge.c,v 1.31 2005/06/01 19:45:34 jdc Exp $	*/
2 
3 /*
4  * Copyright 2001 Wasabi Systems, Inc.
5  * All rights reserved.
6  *
7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. All advertising materials mentioning features or use of this software
18  *    must display the following acknowledgement:
19  *	This product includes software developed for the NetBSD Project by
20  *	Wasabi Systems, Inc.
21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22  *    or promote products derived from this software without specific prior
23  *    written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35  * POSSIBILITY OF SUCH DAMAGE.
36  */
37 
38 /*
39  * Copyright (c) 1999, 2000 Jason L. Wright (jason@thought.net)
40  * All rights reserved.
41  *
42  * Redistribution and use in source and binary forms, with or without
43  * modification, are permitted provided that the following conditions
44  * are met:
45  * 1. Redistributions of source code must retain the above copyright
46  *    notice, this list of conditions and the following disclaimer.
47  * 2. Redistributions in binary form must reproduce the above copyright
48  *    notice, this list of conditions and the following disclaimer in the
49  *    documentation and/or other materials provided with the distribution.
50  * 3. All advertising materials mentioning features or use of this software
51  *    must display the following acknowledgement:
52  *	This product includes software developed by Jason L. Wright
53  * 4. The name of the author may not be used to endorse or promote products
54  *    derived from this software without specific prior written permission.
55  *
56  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
57  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
58  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
59  * DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
60  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
61  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
62  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
63  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
64  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
65  * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
66  * POSSIBILITY OF SUCH DAMAGE.
67  *
68  * OpenBSD: if_bridge.c,v 1.60 2001/06/15 03:38:33 itojun Exp
69  */
70 
71 /*
72  * Network interface bridge support.
73  *
74  * TODO:
75  *
76  *	- Currently only supports Ethernet-like interfaces (Ethernet,
77  *	  802.11, VLANs on Ethernet, etc.)  Figure out a nice way
78  *	  to bridge other types of interfaces (FDDI-FDDI, and maybe
79  *	  consider heterogenous bridges).
80  */
81 
82 #include <sys/cdefs.h>
83 __FBSDID("$FreeBSD$");
84 
85 #include "opt_inet.h"
86 #include "opt_inet6.h"
87 #include "opt_carp.h"
88 
89 #include <sys/param.h>
90 #include <sys/mbuf.h>
91 #include <sys/malloc.h>
92 #include <sys/protosw.h>
93 #include <sys/systm.h>
94 #include <sys/time.h>
95 #include <sys/socket.h> /* for net/if.h */
96 #include <sys/sockio.h>
97 #include <sys/ctype.h>  /* string functions */
98 #include <sys/kernel.h>
99 #include <sys/random.h>
100 #include <sys/syslog.h>
101 #include <sys/sysctl.h>
102 #include <vm/uma.h>
103 #include <sys/module.h>
104 #include <sys/proc.h>
105 #include <sys/lock.h>
106 #include <sys/mutex.h>
107 
108 #include <net/bpf.h>
109 #include <net/if.h>
110 #include <net/if_clone.h>
111 #include <net/if_dl.h>
112 #include <net/if_types.h>
113 #include <net/if_var.h>
114 #include <net/pfil.h>
115 
116 #include <netinet/in.h> /* for struct arpcom */
117 #include <netinet/in_systm.h>
118 #include <netinet/in_var.h>
119 #include <netinet/ip.h>
120 #include <netinet/ip_var.h>
121 #ifdef INET6
122 #include <netinet/ip6.h>
123 #include <netinet6/ip6_var.h>
124 #endif
125 #ifdef DEV_CARP
126 #include <netinet/ip_carp.h>
127 #endif
128 #include <machine/in_cksum.h>
129 #include <netinet/if_ether.h> /* for struct arpcom */
130 #include <net/bridgestp.h>
131 #include <net/if_bridgevar.h>
132 #include <net/if_llc.h>
133 
134 #include <net/route.h>
135 #include <netinet/ip_fw.h>
136 #include <netinet/ip_dummynet.h>
137 
138 /*
139  * Size of the route hash table.  Must be a power of two.
140  */
141 #ifndef BRIDGE_RTHASH_SIZE
142 #define	BRIDGE_RTHASH_SIZE		1024
143 #endif
144 
145 #define	BRIDGE_RTHASH_MASK		(BRIDGE_RTHASH_SIZE - 1)
146 
147 /*
148  * Maximum number of addresses to cache.
149  */
150 #ifndef BRIDGE_RTABLE_MAX
151 #define	BRIDGE_RTABLE_MAX		100
152 #endif
153 
154 /*
155  * Timeout (in seconds) for entries learned dynamically.
156  */
157 #ifndef BRIDGE_RTABLE_TIMEOUT
158 #define	BRIDGE_RTABLE_TIMEOUT		(20 * 60)	/* same as ARP */
159 #endif
160 
161 /*
162  * Number of seconds between walks of the route list.
163  */
164 #ifndef BRIDGE_RTABLE_PRUNE_PERIOD
165 #define	BRIDGE_RTABLE_PRUNE_PERIOD	(5 * 60)
166 #endif
167 
168 /*
169  * List of capabilities to mask on the member interface.
170  */
171 #define	BRIDGE_IFCAPS_MASK		IFCAP_TXCSUM
172 
173 /*
174  * Bridge interface list entry.
175  */
176 struct bridge_iflist {
177 	LIST_ENTRY(bridge_iflist) bif_next;
178 	struct ifnet		*bif_ifp;	/* member if */
179 	struct bstp_port	bif_stp;	/* STP state */
180 	uint32_t		bif_flags;	/* member if flags */
181 	int			bif_mutecap;	/* member muted caps */
182 };
183 
184 /*
185  * Bridge route node.
186  */
187 struct bridge_rtnode {
188 	LIST_ENTRY(bridge_rtnode) brt_hash;	/* hash table linkage */
189 	LIST_ENTRY(bridge_rtnode) brt_list;	/* list linkage */
190 	struct ifnet		*brt_ifp;	/* destination if */
191 	unsigned long		brt_expire;	/* expiration time */
192 	uint8_t			brt_flags;	/* address flags */
193 	uint8_t			brt_addr[ETHER_ADDR_LEN];
194 };
195 
196 /*
197  * Software state for each bridge.
198  */
199 struct bridge_softc {
200 	struct ifnet		*sc_ifp;	/* make this an interface */
201 	LIST_ENTRY(bridge_softc) sc_list;
202 	struct mtx		sc_mtx;
203 	struct cv		sc_cv;
204 	uint32_t		sc_brtmax;	/* max # of addresses */
205 	uint32_t		sc_brtcnt;	/* cur. # of addresses */
206 	uint32_t		sc_brttimeout;	/* rt timeout in seconds */
207 	struct callout		sc_brcallout;	/* bridge callout */
208 	uint32_t		sc_iflist_ref;	/* refcount for sc_iflist */
209 	uint32_t		sc_iflist_xcnt;	/* refcount for sc_iflist */
210 	LIST_HEAD(, bridge_iflist) sc_iflist;	/* member interface list */
211 	LIST_HEAD(, bridge_rtnode) *sc_rthash;	/* our forwarding table */
212 	LIST_HEAD(, bridge_rtnode) sc_rtlist;	/* list version of above */
213 	uint32_t		sc_rthash_key;	/* key for hash */
214 	LIST_HEAD(, bridge_iflist) sc_spanlist;	/* span ports list */
215 	struct bstp_state	sc_stp;		/* STP state */
216 	uint32_t		sc_brtexceeded;	/* # of cache drops */
217 };
218 
219 static struct mtx 	bridge_list_mtx;
220 eventhandler_tag	bridge_detach_cookie = NULL;
221 
222 int	bridge_rtable_prune_period = BRIDGE_RTABLE_PRUNE_PERIOD;
223 
224 uma_zone_t bridge_rtnode_zone;
225 
226 static int	bridge_clone_create(struct if_clone *, int, caddr_t);
227 static void	bridge_clone_destroy(struct ifnet *);
228 
229 static int	bridge_ioctl(struct ifnet *, u_long, caddr_t);
230 static void	bridge_mutecaps(struct bridge_iflist *, int);
231 static void	bridge_ifdetach(void *arg __unused, struct ifnet *);
232 static void	bridge_init(void *);
233 static void	bridge_dummynet(struct mbuf *, struct ifnet *);
234 static void	bridge_stop(struct ifnet *, int);
235 static void	bridge_start(struct ifnet *);
236 static struct mbuf *bridge_input(struct ifnet *, struct mbuf *);
237 static int	bridge_output(struct ifnet *, struct mbuf *, struct sockaddr *,
238 		    struct rtentry *);
239 static void	bridge_enqueue(struct bridge_softc *, struct ifnet *,
240 		    struct mbuf *);
241 static void	bridge_rtdelete(struct bridge_softc *, struct ifnet *ifp, int);
242 
243 static void	bridge_forward(struct bridge_softc *, struct mbuf *m);
244 
245 static void	bridge_timer(void *);
246 
247 static void	bridge_broadcast(struct bridge_softc *, struct ifnet *,
248 		    struct mbuf *, int);
249 static void	bridge_span(struct bridge_softc *, struct mbuf *);
250 
251 static int	bridge_rtupdate(struct bridge_softc *, const uint8_t *,
252 		    struct ifnet *, int, uint8_t);
253 static struct ifnet *bridge_rtlookup(struct bridge_softc *, const uint8_t *);
254 static void	bridge_rttrim(struct bridge_softc *);
255 static void	bridge_rtage(struct bridge_softc *);
256 static void	bridge_rtflush(struct bridge_softc *, int);
257 static int	bridge_rtdaddr(struct bridge_softc *, const uint8_t *);
258 
259 static int	bridge_rtable_init(struct bridge_softc *);
260 static void	bridge_rtable_fini(struct bridge_softc *);
261 
262 static int	bridge_rtnode_addr_cmp(const uint8_t *, const uint8_t *);
263 static struct bridge_rtnode *bridge_rtnode_lookup(struct bridge_softc *,
264 		    const uint8_t *);
265 static int	bridge_rtnode_insert(struct bridge_softc *,
266 		    struct bridge_rtnode *);
267 static void	bridge_rtnode_destroy(struct bridge_softc *,
268 		    struct bridge_rtnode *);
269 static void	bridge_state_change(struct ifnet *, int);
270 
271 static struct bridge_iflist *bridge_lookup_member(struct bridge_softc *,
272 		    const char *name);
273 static struct bridge_iflist *bridge_lookup_member_if(struct bridge_softc *,
274 		    struct ifnet *ifp);
275 static void	bridge_delete_member(struct bridge_softc *,
276 		    struct bridge_iflist *, int);
277 static void	bridge_delete_span(struct bridge_softc *,
278 		    struct bridge_iflist *);
279 
280 static int	bridge_ioctl_add(struct bridge_softc *, void *);
281 static int	bridge_ioctl_del(struct bridge_softc *, void *);
282 static int	bridge_ioctl_gifflags(struct bridge_softc *, void *);
283 static int	bridge_ioctl_sifflags(struct bridge_softc *, void *);
284 static int	bridge_ioctl_scache(struct bridge_softc *, void *);
285 static int	bridge_ioctl_gcache(struct bridge_softc *, void *);
286 static int	bridge_ioctl_gifs(struct bridge_softc *, void *);
287 static int	bridge_ioctl_rts(struct bridge_softc *, void *);
288 static int	bridge_ioctl_saddr(struct bridge_softc *, void *);
289 static int	bridge_ioctl_sto(struct bridge_softc *, void *);
290 static int	bridge_ioctl_gto(struct bridge_softc *, void *);
291 static int	bridge_ioctl_daddr(struct bridge_softc *, void *);
292 static int	bridge_ioctl_flush(struct bridge_softc *, void *);
293 static int	bridge_ioctl_gpri(struct bridge_softc *, void *);
294 static int	bridge_ioctl_spri(struct bridge_softc *, void *);
295 static int	bridge_ioctl_ght(struct bridge_softc *, void *);
296 static int	bridge_ioctl_sht(struct bridge_softc *, void *);
297 static int	bridge_ioctl_gfd(struct bridge_softc *, void *);
298 static int	bridge_ioctl_sfd(struct bridge_softc *, void *);
299 static int	bridge_ioctl_gma(struct bridge_softc *, void *);
300 static int	bridge_ioctl_sma(struct bridge_softc *, void *);
301 static int	bridge_ioctl_sifprio(struct bridge_softc *, void *);
302 static int	bridge_ioctl_sifcost(struct bridge_softc *, void *);
303 static int	bridge_ioctl_addspan(struct bridge_softc *, void *);
304 static int	bridge_ioctl_delspan(struct bridge_softc *, void *);
305 static int	bridge_ioctl_gbparam(struct bridge_softc *, void *);
306 static int	bridge_ioctl_grte(struct bridge_softc *, void *);
307 static int	bridge_ioctl_gifsstp(struct bridge_softc *, void *);
308 static int	bridge_pfil(struct mbuf **, struct ifnet *, struct ifnet *,
309 		    int);
310 static int	bridge_ip_checkbasic(struct mbuf **mp);
311 #ifdef INET6
312 static int	bridge_ip6_checkbasic(struct mbuf **mp);
313 #endif /* INET6 */
314 static int	bridge_fragment(struct ifnet *, struct mbuf *,
315 		    struct ether_header *, int, struct llc *);
316 
317 SYSCTL_DECL(_net_link);
318 SYSCTL_NODE(_net_link, IFT_BRIDGE, bridge, CTLFLAG_RW, 0, "Bridge");
319 
320 static int pfil_onlyip = 1; /* only pass IP[46] packets when pfil is enabled */
321 static int pfil_bridge = 1; /* run pfil hooks on the bridge interface */
322 static int pfil_member = 1; /* run pfil hooks on the member interface */
323 static int pfil_ipfw = 0;   /* layer2 filter with ipfw */
324 static int pfil_ipfw_arp = 0;   /* layer2 filter with ipfw */
325 static int log_stp   = 0;   /* log STP state changes */
326 SYSCTL_INT(_net_link_bridge, OID_AUTO, pfil_onlyip, CTLFLAG_RW,
327     &pfil_onlyip, 0, "Only pass IP packets when pfil is enabled");
328 SYSCTL_INT(_net_link_bridge, OID_AUTO, ipfw_arp, CTLFLAG_RW,
329     &pfil_ipfw_arp, 0, "Filter ARP packets through IPFW layer2");
330 SYSCTL_INT(_net_link_bridge, OID_AUTO, pfil_bridge, CTLFLAG_RW,
331     &pfil_bridge, 0, "Packet filter on the bridge interface");
332 SYSCTL_INT(_net_link_bridge, OID_AUTO, pfil_member, CTLFLAG_RW,
333     &pfil_member, 0, "Packet filter on the member interface");
334 SYSCTL_INT(_net_link_bridge, OID_AUTO, log_stp, CTLFLAG_RW,
335     &log_stp, 0, "Log STP state changes");
336 
337 struct bridge_control {
338 	int	(*bc_func)(struct bridge_softc *, void *);
339 	int	bc_argsize;
340 	int	bc_flags;
341 };
342 
343 #define	BC_F_COPYIN		0x01	/* copy arguments in */
344 #define	BC_F_COPYOUT		0x02	/* copy arguments out */
345 #define	BC_F_SUSER		0x04	/* do super-user check */
346 
347 const struct bridge_control bridge_control_table[] = {
348 	{ bridge_ioctl_add,		sizeof(struct ifbreq),
349 	  BC_F_COPYIN|BC_F_SUSER },
350 	{ bridge_ioctl_del,		sizeof(struct ifbreq),
351 	  BC_F_COPYIN|BC_F_SUSER },
352 
353 	{ bridge_ioctl_gifflags,	sizeof(struct ifbreq),
354 	  BC_F_COPYIN|BC_F_COPYOUT },
355 	{ bridge_ioctl_sifflags,	sizeof(struct ifbreq),
356 	  BC_F_COPYIN|BC_F_SUSER },
357 
358 	{ bridge_ioctl_scache,		sizeof(struct ifbrparam),
359 	  BC_F_COPYIN|BC_F_SUSER },
360 	{ bridge_ioctl_gcache,		sizeof(struct ifbrparam),
361 	  BC_F_COPYOUT },
362 
363 	{ bridge_ioctl_gifs,		sizeof(struct ifbifconf),
364 	  BC_F_COPYIN|BC_F_COPYOUT },
365 	{ bridge_ioctl_rts,		sizeof(struct ifbaconf),
366 	  BC_F_COPYIN|BC_F_COPYOUT },
367 
368 	{ bridge_ioctl_saddr,		sizeof(struct ifbareq),
369 	  BC_F_COPYIN|BC_F_SUSER },
370 
371 	{ bridge_ioctl_sto,		sizeof(struct ifbrparam),
372 	  BC_F_COPYIN|BC_F_SUSER },
373 	{ bridge_ioctl_gto,		sizeof(struct ifbrparam),
374 	  BC_F_COPYOUT },
375 
376 	{ bridge_ioctl_daddr,		sizeof(struct ifbareq),
377 	  BC_F_COPYIN|BC_F_SUSER },
378 
379 	{ bridge_ioctl_flush,		sizeof(struct ifbreq),
380 	  BC_F_COPYIN|BC_F_SUSER },
381 
382 	{ bridge_ioctl_gpri,		sizeof(struct ifbrparam),
383 	  BC_F_COPYOUT },
384 	{ bridge_ioctl_spri,		sizeof(struct ifbrparam),
385 	  BC_F_COPYIN|BC_F_SUSER },
386 
387 	{ bridge_ioctl_ght,		sizeof(struct ifbrparam),
388 	  BC_F_COPYOUT },
389 	{ bridge_ioctl_sht,		sizeof(struct ifbrparam),
390 	  BC_F_COPYIN|BC_F_SUSER },
391 
392 	{ bridge_ioctl_gfd,		sizeof(struct ifbrparam),
393 	  BC_F_COPYOUT },
394 	{ bridge_ioctl_sfd,		sizeof(struct ifbrparam),
395 	  BC_F_COPYIN|BC_F_SUSER },
396 
397 	{ bridge_ioctl_gma,		sizeof(struct ifbrparam),
398 	  BC_F_COPYOUT },
399 	{ bridge_ioctl_sma,		sizeof(struct ifbrparam),
400 	  BC_F_COPYIN|BC_F_SUSER },
401 
402 	{ bridge_ioctl_sifprio,		sizeof(struct ifbreq),
403 	  BC_F_COPYIN|BC_F_SUSER },
404 
405 	{ bridge_ioctl_sifcost,		sizeof(struct ifbreq),
406 	  BC_F_COPYIN|BC_F_SUSER },
407 
408 	{ bridge_ioctl_addspan,		sizeof(struct ifbreq),
409 	  BC_F_COPYIN|BC_F_SUSER },
410 	{ bridge_ioctl_delspan,		sizeof(struct ifbreq),
411 	  BC_F_COPYIN|BC_F_SUSER },
412 
413 	{ bridge_ioctl_gbparam,		sizeof(struct ifbropreq),
414 	  BC_F_COPYOUT },
415 
416 	{ bridge_ioctl_grte,		sizeof(struct ifbrparam),
417 	  BC_F_COPYOUT },
418 
419 	{ bridge_ioctl_gifsstp,		sizeof(struct ifbpstpconf),
420 	  BC_F_COPYOUT },
421 };
422 const int bridge_control_table_size =
423     sizeof(bridge_control_table) / sizeof(bridge_control_table[0]);
424 
425 static const u_char etherbroadcastaddr[ETHER_ADDR_LEN] =
426 			{ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
427 
428 LIST_HEAD(, bridge_softc) bridge_list;
429 
430 IFC_SIMPLE_DECLARE(bridge, 0);
431 
432 static int
433 bridge_modevent(module_t mod, int type, void *data)
434 {
435 
436 	switch (type) {
437 	case MOD_LOAD:
438 		mtx_init(&bridge_list_mtx, "if_bridge list", NULL, MTX_DEF);
439 		if_clone_attach(&bridge_cloner);
440 		bridge_rtnode_zone = uma_zcreate("bridge_rtnode",
441 		    sizeof(struct bridge_rtnode), NULL, NULL, NULL, NULL,
442 		    UMA_ALIGN_PTR, 0);
443 		LIST_INIT(&bridge_list);
444 		bridge_input_p = bridge_input;
445 		bridge_output_p = bridge_output;
446 		bridge_dn_p = bridge_dummynet;
447 		bstp_linkstate_p = bstp_linkstate;
448 		bridge_detach_cookie = EVENTHANDLER_REGISTER(
449 		    ifnet_departure_event, bridge_ifdetach, NULL,
450 		    EVENTHANDLER_PRI_ANY);
451 		break;
452 	case MOD_UNLOAD:
453 		EVENTHANDLER_DEREGISTER(ifnet_departure_event,
454 		    bridge_detach_cookie);
455 		if_clone_detach(&bridge_cloner);
456 		uma_zdestroy(bridge_rtnode_zone);
457 		bridge_input_p = NULL;
458 		bridge_output_p = NULL;
459 		bridge_dn_p = NULL;
460 		bstp_linkstate_p = NULL;
461 		mtx_destroy(&bridge_list_mtx);
462 		break;
463 	default:
464 		return (EOPNOTSUPP);
465 	}
466 	return (0);
467 }
468 
469 static moduledata_t bridge_mod = {
470 	"if_bridge",
471 	bridge_modevent,
472 	0
473 };
474 
475 DECLARE_MODULE(if_bridge, bridge_mod, SI_SUB_PSEUDO, SI_ORDER_ANY);
476 MODULE_DEPEND(if_bridge, bridgestp, 1, 1, 1);
477 
478 /*
479  * handler for net.link.bridge.pfil_ipfw
480  */
481 static int
482 sysctl_pfil_ipfw(SYSCTL_HANDLER_ARGS)
483 {
484 	int enable = pfil_ipfw;
485 	int error;
486 
487 	error = sysctl_handle_int(oidp, &enable, 0, req);
488 	enable = (enable) ? 1 : 0;
489 
490 	if (enable != pfil_ipfw) {
491 		pfil_ipfw = enable;
492 
493 		/*
494 		 * Disable pfil so that ipfw doesnt run twice, if the user
495 		 * really wants both then they can re-enable pfil_bridge and/or
496 		 * pfil_member. Also allow non-ip packets as ipfw can filter by
497 		 * layer2 type.
498 		 */
499 		if (pfil_ipfw) {
500 			pfil_onlyip = 0;
501 			pfil_bridge = 0;
502 			pfil_member = 0;
503 		}
504 	}
505 
506 	return (error);
507 }
508 SYSCTL_PROC(_net_link_bridge, OID_AUTO, ipfw, CTLTYPE_INT|CTLFLAG_RW,
509 	    &pfil_ipfw, 0, &sysctl_pfil_ipfw, "I", "Layer2 filter with IPFW");
510 
511 /*
512  * bridge_clone_create:
513  *
514  *	Create a new bridge instance.
515  */
516 static int
517 bridge_clone_create(struct if_clone *ifc, int unit, caddr_t params)
518 {
519 	struct bridge_softc *sc, *sc2;
520 	struct ifnet *bifp, *ifp;
521 	u_char eaddr[6];
522 	int retry;
523 
524 	sc = malloc(sizeof(*sc), M_DEVBUF, M_WAITOK|M_ZERO);
525 	BRIDGE_LOCK_INIT(sc);
526 	ifp = sc->sc_ifp = if_alloc(IFT_ETHER);
527 	if (ifp == NULL) {
528 		free(sc, M_DEVBUF);
529 		return (ENOSPC);
530 	}
531 
532 	sc->sc_brtmax = BRIDGE_RTABLE_MAX;
533 	sc->sc_brttimeout = BRIDGE_RTABLE_TIMEOUT;
534 	getmicrotime(&(sc->sc_stp.bs_last_tc_time));
535 
536 	/* Initialize our routing table. */
537 	bridge_rtable_init(sc);
538 
539 	callout_init_mtx(&sc->sc_brcallout, &sc->sc_mtx, 0);
540 
541 	LIST_INIT(&sc->sc_iflist);
542 	LIST_INIT(&sc->sc_spanlist);
543 
544 	ifp->if_softc = sc;
545 	if_initname(ifp, ifc->ifc_name, unit);
546 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
547 	ifp->if_ioctl = bridge_ioctl;
548 	ifp->if_start = bridge_start;
549 	ifp->if_init = bridge_init;
550 	ifp->if_type = IFT_BRIDGE;
551 	IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen);
552 	ifp->if_snd.ifq_drv_maxlen = ifqmaxlen;
553 	IFQ_SET_READY(&ifp->if_snd);
554 
555 	/*
556 	 * Generate a random ethernet address with a locally administered
557 	 * address.
558 	 *
559 	 * Since we are using random ethernet addresses for the bridge, it is
560 	 * possible that we might have address collisions, so make sure that
561 	 * this hardware address isn't already in use on another bridge.
562 	 */
563 	for (retry = 1; retry != 0;) {
564 		arc4rand(eaddr, ETHER_ADDR_LEN, 1);
565 		eaddr[0] &= ~1;		/* clear multicast bit */
566 		eaddr[0] |= 2;		/* set the LAA bit */
567 		retry = 0;
568 		mtx_lock(&bridge_list_mtx);
569 		LIST_FOREACH(sc2, &bridge_list, sc_list) {
570 			bifp = sc2->sc_ifp;
571 			if (memcmp(eaddr, IF_LLADDR(bifp), ETHER_ADDR_LEN) == 0)
572 				retry = 1;
573 		}
574 		mtx_unlock(&bridge_list_mtx);
575 	}
576 
577 	bstp_attach(&sc->sc_stp, bridge_state_change);
578 	ether_ifattach(ifp, eaddr);
579 	/* Now undo some of the damage... */
580 	ifp->if_baudrate = 0;
581 	ifp->if_type = IFT_BRIDGE;
582 
583 	mtx_lock(&bridge_list_mtx);
584 	LIST_INSERT_HEAD(&bridge_list, sc, sc_list);
585 	mtx_unlock(&bridge_list_mtx);
586 
587 	return (0);
588 }
589 
590 /*
591  * bridge_clone_destroy:
592  *
593  *	Destroy a bridge instance.
594  */
595 static void
596 bridge_clone_destroy(struct ifnet *ifp)
597 {
598 	struct bridge_softc *sc = ifp->if_softc;
599 	struct bridge_iflist *bif;
600 
601 	BRIDGE_LOCK(sc);
602 
603 	bridge_stop(ifp, 1);
604 	ifp->if_flags &= ~IFF_UP;
605 
606 	while ((bif = LIST_FIRST(&sc->sc_iflist)) != NULL)
607 		bridge_delete_member(sc, bif, 0);
608 
609 	while ((bif = LIST_FIRST(&sc->sc_spanlist)) != NULL) {
610 		bridge_delete_span(sc, bif);
611 	}
612 
613 	BRIDGE_UNLOCK(sc);
614 
615 	callout_drain(&sc->sc_brcallout);
616 
617 	mtx_lock(&bridge_list_mtx);
618 	LIST_REMOVE(sc, sc_list);
619 	mtx_unlock(&bridge_list_mtx);
620 
621 	bstp_detach(&sc->sc_stp);
622 	ether_ifdetach(ifp);
623 	if_free_type(ifp, IFT_ETHER);
624 
625 	/* Tear down the routing table. */
626 	bridge_rtable_fini(sc);
627 
628 	BRIDGE_LOCK_DESTROY(sc);
629 	free(sc, M_DEVBUF);
630 }
631 
632 /*
633  * bridge_ioctl:
634  *
635  *	Handle a control request from the operator.
636  */
637 static int
638 bridge_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
639 {
640 	struct bridge_softc *sc = ifp->if_softc;
641 	struct thread *td = curthread;
642 	union {
643 		struct ifbreq ifbreq;
644 		struct ifbifconf ifbifconf;
645 		struct ifbareq ifbareq;
646 		struct ifbaconf ifbaconf;
647 		struct ifbrparam ifbrparam;
648 	} args;
649 	struct ifdrv *ifd = (struct ifdrv *) data;
650 	const struct bridge_control *bc;
651 	int error = 0;
652 
653 	BRIDGE_LOCK(sc);
654 
655 	switch (cmd) {
656 
657 	case SIOCADDMULTI:
658 	case SIOCDELMULTI:
659 		break;
660 
661 	case SIOCGDRVSPEC:
662 	case SIOCSDRVSPEC:
663 		if (ifd->ifd_cmd >= bridge_control_table_size) {
664 			error = EINVAL;
665 			break;
666 		}
667 		bc = &bridge_control_table[ifd->ifd_cmd];
668 
669 		if (cmd == SIOCGDRVSPEC &&
670 		    (bc->bc_flags & BC_F_COPYOUT) == 0) {
671 			error = EINVAL;
672 			break;
673 		}
674 		else if (cmd == SIOCSDRVSPEC &&
675 		    (bc->bc_flags & BC_F_COPYOUT) != 0) {
676 			error = EINVAL;
677 			break;
678 		}
679 
680 		if (bc->bc_flags & BC_F_SUSER) {
681 			error = suser(td);
682 			if (error)
683 				break;
684 		}
685 
686 		if (ifd->ifd_len != bc->bc_argsize ||
687 		    ifd->ifd_len > sizeof(args)) {
688 			error = EINVAL;
689 			break;
690 		}
691 
692 		bzero(&args, sizeof(args));
693 		if (bc->bc_flags & BC_F_COPYIN) {
694 			error = copyin(ifd->ifd_data, &args, ifd->ifd_len);
695 			if (error)
696 				break;
697 		}
698 
699 		error = (*bc->bc_func)(sc, &args);
700 		if (error)
701 			break;
702 
703 		if (bc->bc_flags & BC_F_COPYOUT)
704 			error = copyout(&args, ifd->ifd_data, ifd->ifd_len);
705 
706 		break;
707 
708 	case SIOCSIFFLAGS:
709 		if (!(ifp->if_flags & IFF_UP) &&
710 		    (ifp->if_drv_flags & IFF_DRV_RUNNING)) {
711 			/*
712 			 * If interface is marked down and it is running,
713 			 * then stop and disable it.
714 			 */
715 			bridge_stop(ifp, 1);
716 		} else if ((ifp->if_flags & IFF_UP) &&
717 		    !(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
718 			/*
719 			 * If interface is marked up and it is stopped, then
720 			 * start it.
721 			 */
722 			BRIDGE_UNLOCK(sc);
723 			(*ifp->if_init)(sc);
724 		}
725 		break;
726 
727 	case SIOCSIFMTU:
728 		/* Do not allow the MTU to be changed on the bridge */
729 		error = EINVAL;
730 		break;
731 
732 	default:
733 		/*
734 		 * drop the lock as ether_ioctl() will call bridge_start() and
735 		 * cause the lock to be recursed.
736 		 */
737 		BRIDGE_UNLOCK(sc);
738 		error = ether_ioctl(ifp, cmd, data);
739 		break;
740 	}
741 
742 	if (BRIDGE_LOCKED(sc))
743 		BRIDGE_UNLOCK(sc);
744 
745 	return (error);
746 }
747 
748 /*
749  * bridge_mutecaps:
750  *
751  *	Clear or restore unwanted capabilities on the member interface
752  */
753 static void
754 bridge_mutecaps(struct bridge_iflist *bif, int mute)
755 {
756 	struct ifnet *ifp = bif->bif_ifp;
757 	struct ifreq ifr;
758 	int error;
759 
760 	if (ifp->if_ioctl == NULL)
761 		return;
762 
763 	bzero(&ifr, sizeof(ifr));
764 	ifr.ifr_reqcap = ifp->if_capenable;
765 
766 	if (mute) {
767 		/* mask off and save capabilities */
768 		bif->bif_mutecap = ifr.ifr_reqcap & BRIDGE_IFCAPS_MASK;
769 		if (bif->bif_mutecap != 0)
770 			ifr.ifr_reqcap &= ~BRIDGE_IFCAPS_MASK;
771 	} else
772 		/* restore muted capabilities */
773 		ifr.ifr_reqcap |= bif->bif_mutecap;
774 
775 
776 	if (bif->bif_mutecap != 0) {
777 		IFF_LOCKGIANT(ifp);
778 		error = (*ifp->if_ioctl)(ifp, SIOCSIFCAP, (caddr_t)&ifr);
779 		IFF_UNLOCKGIANT(ifp);
780 	}
781 }
782 
783 /*
784  * bridge_lookup_member:
785  *
786  *	Lookup a bridge member interface.
787  */
788 static struct bridge_iflist *
789 bridge_lookup_member(struct bridge_softc *sc, const char *name)
790 {
791 	struct bridge_iflist *bif;
792 	struct ifnet *ifp;
793 
794 	BRIDGE_LOCK_ASSERT(sc);
795 
796 	LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
797 		ifp = bif->bif_ifp;
798 		if (strcmp(ifp->if_xname, name) == 0)
799 			return (bif);
800 	}
801 
802 	return (NULL);
803 }
804 
805 /*
806  * bridge_lookup_member_if:
807  *
808  *	Lookup a bridge member interface by ifnet*.
809  */
810 static struct bridge_iflist *
811 bridge_lookup_member_if(struct bridge_softc *sc, struct ifnet *member_ifp)
812 {
813 	struct bridge_iflist *bif;
814 
815 	BRIDGE_LOCK_ASSERT(sc);
816 
817 	LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
818 		if (bif->bif_ifp == member_ifp)
819 			return (bif);
820 	}
821 
822 	return (NULL);
823 }
824 
825 /*
826  * bridge_delete_member:
827  *
828  *	Delete the specified member interface.
829  */
830 static void
831 bridge_delete_member(struct bridge_softc *sc, struct bridge_iflist *bif,
832     int gone)
833 {
834 	struct ifnet *ifs = bif->bif_ifp;
835 
836 	BRIDGE_LOCK_ASSERT(sc);
837 
838 	if (!gone) {
839 		switch (ifs->if_type) {
840 		case IFT_ETHER:
841 		case IFT_L2VLAN:
842 			/*
843 			 * Take the interface out of promiscuous mode.
844 			 */
845 			(void) ifpromisc(ifs, 0);
846 			bridge_mutecaps(bif, 0);
847 			break;
848 
849 		case IFT_GIF:
850 			break;
851 
852 		default:
853 #ifdef DIAGNOSTIC
854 			panic("bridge_delete_member: impossible");
855 #endif
856 			break;
857 		}
858 	}
859 
860 	if (bif->bif_flags & IFBIF_STP)
861 		bstp_delete(&bif->bif_stp);
862 
863 	ifs->if_bridge = NULL;
864 	BRIDGE_XLOCK(sc);
865 	LIST_REMOVE(bif, bif_next);
866 	BRIDGE_XDROP(sc);
867 
868 	bridge_rtdelete(sc, ifs, IFBF_FLUSHALL);
869 
870 	BRIDGE_UNLOCK(sc);
871 	bstp_drain(&bif->bif_stp);	/* prepare to free */
872 	BRIDGE_LOCK(sc);
873 	free(bif, M_DEVBUF);
874 }
875 
876 /*
877  * bridge_delete_span:
878  *
879  *	Delete the specified span interface.
880  */
881 static void
882 bridge_delete_span(struct bridge_softc *sc, struct bridge_iflist *bif)
883 {
884 	BRIDGE_LOCK_ASSERT(sc);
885 
886 	KASSERT(bif->bif_ifp->if_bridge == NULL,
887 	    ("%s: not a span interface", __func__));
888 
889 	LIST_REMOVE(bif, bif_next);
890 	free(bif, M_DEVBUF);
891 }
892 
893 static int
894 bridge_ioctl_add(struct bridge_softc *sc, void *arg)
895 {
896 	struct ifbreq *req = arg;
897 	struct bridge_iflist *bif = NULL;
898 	struct ifnet *ifs;
899 	int error = 0;
900 
901 	ifs = ifunit(req->ifbr_ifsname);
902 	if (ifs == NULL)
903 		return (ENOENT);
904 
905 	/* If it's in the span list, it can't be a member. */
906 	LIST_FOREACH(bif, &sc->sc_spanlist, bif_next)
907 		if (ifs == bif->bif_ifp)
908 			return (EBUSY);
909 
910 	/* Allow the first Ethernet member to define the MTU */
911 	if (ifs->if_type != IFT_GIF) {
912 		if (LIST_EMPTY(&sc->sc_iflist))
913 			sc->sc_ifp->if_mtu = ifs->if_mtu;
914 		else if (sc->sc_ifp->if_mtu != ifs->if_mtu) {
915 			if_printf(sc->sc_ifp, "invalid MTU for %s\n",
916 			    ifs->if_xname);
917 			return (EINVAL);
918 		}
919 	}
920 
921 	if (ifs->if_bridge == sc)
922 		return (EEXIST);
923 
924 	if (ifs->if_bridge != NULL)
925 		return (EBUSY);
926 
927 	bif = malloc(sizeof(*bif), M_DEVBUF, M_NOWAIT|M_ZERO);
928 	if (bif == NULL)
929 		return (ENOMEM);
930 
931 	bif->bif_ifp = ifs;
932 	bif->bif_flags = IFBIF_LEARNING | IFBIF_DISCOVER;
933 
934 	switch (ifs->if_type) {
935 	case IFT_ETHER:
936 	case IFT_L2VLAN:
937 		/*
938 		 * Place the interface into promiscuous mode.
939 		 */
940 		error = ifpromisc(ifs, 1);
941 		if (error)
942 			goto out;
943 
944 		bridge_mutecaps(bif, 1);
945 		break;
946 
947 	case IFT_GIF:
948 		break;
949 
950 	default:
951 		error = EINVAL;
952 		goto out;
953 	}
954 
955 	ifs->if_bridge = sc;
956 	/*
957 	 * XXX: XLOCK HERE!?!
958 	 *
959 	 * NOTE: insert_***HEAD*** should be safe for the traversals.
960 	 */
961 	LIST_INSERT_HEAD(&sc->sc_iflist, bif, bif_next);
962 
963 out:
964 	if (error) {
965 		if (bif != NULL)
966 			free(bif, M_DEVBUF);
967 	}
968 	return (error);
969 }
970 
971 static int
972 bridge_ioctl_del(struct bridge_softc *sc, void *arg)
973 {
974 	struct ifbreq *req = arg;
975 	struct bridge_iflist *bif;
976 
977 	bif = bridge_lookup_member(sc, req->ifbr_ifsname);
978 	if (bif == NULL)
979 		return (ENOENT);
980 
981 	bridge_delete_member(sc, bif, 0);
982 
983 	return (0);
984 }
985 
986 static int
987 bridge_ioctl_gifflags(struct bridge_softc *sc, void *arg)
988 {
989 	struct ifbreq *req = arg;
990 	struct bridge_iflist *bif;
991 
992 	bif = bridge_lookup_member(sc, req->ifbr_ifsname);
993 	if (bif == NULL)
994 		return (ENOENT);
995 
996 	req->ifbr_ifsflags = bif->bif_flags;
997 	req->ifbr_state = bif->bif_stp.bp_state;
998 	req->ifbr_priority = bif->bif_stp.bp_priority;
999 	req->ifbr_path_cost = bif->bif_stp.bp_path_cost;
1000 	req->ifbr_portno = bif->bif_ifp->if_index & 0xff;
1001 
1002 	return (0);
1003 }
1004 
1005 static int
1006 bridge_ioctl_sifflags(struct bridge_softc *sc, void *arg)
1007 {
1008 	struct ifbreq *req = arg;
1009 	struct bridge_iflist *bif;
1010 	int error;
1011 
1012 	bif = bridge_lookup_member(sc, req->ifbr_ifsname);
1013 	if (bif == NULL)
1014 		return (ENOENT);
1015 
1016 	if (req->ifbr_ifsflags & IFBIF_SPAN)
1017 		/* SPAN is readonly */
1018 		return (EINVAL);
1019 
1020 	if (req->ifbr_ifsflags & IFBIF_STP) {
1021 		if ((bif->bif_flags & IFBIF_STP) == 0) {
1022 			error = bstp_add(&sc->sc_stp, &bif->bif_stp,
1023 				    bif->bif_ifp);
1024 			if (error)
1025 				return (error);
1026 		}
1027 	} else {
1028 		if ((bif->bif_flags & IFBIF_STP) != 0)
1029 			bstp_delete(&bif->bif_stp);
1030 	}
1031 
1032 	bif->bif_flags = req->ifbr_ifsflags;
1033 
1034 	return (0);
1035 }
1036 
1037 static int
1038 bridge_ioctl_scache(struct bridge_softc *sc, void *arg)
1039 {
1040 	struct ifbrparam *param = arg;
1041 
1042 	sc->sc_brtmax = param->ifbrp_csize;
1043 	bridge_rttrim(sc);
1044 
1045 	return (0);
1046 }
1047 
1048 static int
1049 bridge_ioctl_gcache(struct bridge_softc *sc, void *arg)
1050 {
1051 	struct ifbrparam *param = arg;
1052 
1053 	param->ifbrp_csize = sc->sc_brtmax;
1054 
1055 	return (0);
1056 }
1057 
1058 static int
1059 bridge_ioctl_gifs(struct bridge_softc *sc, void *arg)
1060 {
1061 	struct ifbifconf *bifc = arg;
1062 	struct bridge_iflist *bif;
1063 	struct ifbreq breq;
1064 	int count, len, error = 0;
1065 
1066 	count = 0;
1067 	LIST_FOREACH(bif, &sc->sc_iflist, bif_next)
1068 		count++;
1069 	LIST_FOREACH(bif, &sc->sc_spanlist, bif_next)
1070 		count++;
1071 
1072 	if (bifc->ifbic_len == 0) {
1073 		bifc->ifbic_len = sizeof(breq) * count;
1074 		return (0);
1075 	}
1076 
1077 	count = 0;
1078 	len = bifc->ifbic_len;
1079 	bzero(&breq, sizeof(breq));
1080 	LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
1081 		if (len < sizeof(breq))
1082 			break;
1083 
1084 		strlcpy(breq.ifbr_ifsname, bif->bif_ifp->if_xname,
1085 		    sizeof(breq.ifbr_ifsname));
1086 		breq.ifbr_ifsflags = bif->bif_flags;
1087 		breq.ifbr_state = bif->bif_stp.bp_state;
1088 		breq.ifbr_priority = bif->bif_stp.bp_priority;
1089 		breq.ifbr_path_cost = bif->bif_stp.bp_path_cost;
1090 		breq.ifbr_portno = bif->bif_ifp->if_index & 0xff;
1091 		error = copyout(&breq, bifc->ifbic_req + count, sizeof(breq));
1092 		if (error)
1093 			break;
1094 		count++;
1095 		len -= sizeof(breq);
1096 	}
1097 	LIST_FOREACH(bif, &sc->sc_spanlist, bif_next) {
1098 		if (len < sizeof(breq))
1099 			break;
1100 
1101 		strlcpy(breq.ifbr_ifsname, bif->bif_ifp->if_xname,
1102 		    sizeof(breq.ifbr_ifsname));
1103 		breq.ifbr_ifsflags = bif->bif_flags;
1104 		breq.ifbr_state = bif->bif_stp.bp_state;
1105 		breq.ifbr_priority = bif->bif_stp.bp_priority;
1106 		breq.ifbr_path_cost = bif->bif_stp.bp_path_cost;
1107 		breq.ifbr_portno = bif->bif_ifp->if_index & 0xff;
1108 		error = copyout(&breq, bifc->ifbic_req + count, sizeof(breq));
1109 		if (error)
1110 			break;
1111 		count++;
1112 		len -= sizeof(breq);
1113 	}
1114 
1115 	bifc->ifbic_len = sizeof(breq) * count;
1116 	return (error);
1117 }
1118 
1119 static int
1120 bridge_ioctl_rts(struct bridge_softc *sc, void *arg)
1121 {
1122 	struct ifbaconf *bac = arg;
1123 	struct bridge_rtnode *brt;
1124 	struct ifbareq bareq;
1125 	int count = 0, error = 0, len;
1126 
1127 	if (bac->ifbac_len == 0)
1128 		return (0);
1129 
1130 	len = bac->ifbac_len;
1131 	bzero(&bareq, sizeof(bareq));
1132 	LIST_FOREACH(brt, &sc->sc_rtlist, brt_list) {
1133 		if (len < sizeof(bareq))
1134 			goto out;
1135 		strlcpy(bareq.ifba_ifsname, brt->brt_ifp->if_xname,
1136 		    sizeof(bareq.ifba_ifsname));
1137 		memcpy(bareq.ifba_dst, brt->brt_addr, sizeof(brt->brt_addr));
1138 		if ((brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC &&
1139 				time_uptime < brt->brt_expire)
1140 			bareq.ifba_expire = brt->brt_expire - time_uptime;
1141 		else
1142 			bareq.ifba_expire = 0;
1143 		bareq.ifba_flags = brt->brt_flags;
1144 
1145 		error = copyout(&bareq, bac->ifbac_req + count, sizeof(bareq));
1146 		if (error)
1147 			goto out;
1148 		count++;
1149 		len -= sizeof(bareq);
1150 	}
1151 out:
1152 	bac->ifbac_len = sizeof(bareq) * count;
1153 	return (error);
1154 }
1155 
1156 static int
1157 bridge_ioctl_saddr(struct bridge_softc *sc, void *arg)
1158 {
1159 	struct ifbareq *req = arg;
1160 	struct bridge_iflist *bif;
1161 	int error;
1162 
1163 	bif = bridge_lookup_member(sc, req->ifba_ifsname);
1164 	if (bif == NULL)
1165 		return (ENOENT);
1166 
1167 	error = bridge_rtupdate(sc, req->ifba_dst, bif->bif_ifp, 1,
1168 	    req->ifba_flags);
1169 
1170 	return (error);
1171 }
1172 
1173 static int
1174 bridge_ioctl_sto(struct bridge_softc *sc, void *arg)
1175 {
1176 	struct ifbrparam *param = arg;
1177 
1178 	sc->sc_brttimeout = param->ifbrp_ctime;
1179 	return (0);
1180 }
1181 
1182 static int
1183 bridge_ioctl_gto(struct bridge_softc *sc, void *arg)
1184 {
1185 	struct ifbrparam *param = arg;
1186 
1187 	param->ifbrp_ctime = sc->sc_brttimeout;
1188 	return (0);
1189 }
1190 
1191 static int
1192 bridge_ioctl_daddr(struct bridge_softc *sc, void *arg)
1193 {
1194 	struct ifbareq *req = arg;
1195 
1196 	return (bridge_rtdaddr(sc, req->ifba_dst));
1197 }
1198 
1199 static int
1200 bridge_ioctl_flush(struct bridge_softc *sc, void *arg)
1201 {
1202 	struct ifbreq *req = arg;
1203 
1204 	bridge_rtflush(sc, req->ifbr_ifsflags);
1205 	return (0);
1206 }
1207 
1208 static int
1209 bridge_ioctl_gpri(struct bridge_softc *sc, void *arg)
1210 {
1211 	struct ifbrparam *param = arg;
1212 	struct bstp_state *bs = &sc->sc_stp;
1213 
1214 	param->ifbrp_prio = bs->bs_bridge_priority;
1215 	return (0);
1216 }
1217 
1218 static int
1219 bridge_ioctl_spri(struct bridge_softc *sc, void *arg)
1220 {
1221 	struct ifbrparam *param = arg;
1222 	struct bstp_state *bs = &sc->sc_stp;
1223 
1224 	bs->bs_bridge_priority = param->ifbrp_prio;
1225 	bstp_reinit(bs);
1226 
1227 	return (0);
1228 }
1229 
1230 static int
1231 bridge_ioctl_ght(struct bridge_softc *sc, void *arg)
1232 {
1233 	struct ifbrparam *param = arg;
1234 	struct bstp_state *bs = &sc->sc_stp;
1235 
1236 	param->ifbrp_hellotime = bs->bs_bridge_hello_time >> 8;
1237 	return (0);
1238 }
1239 
1240 static int
1241 bridge_ioctl_sht(struct bridge_softc *sc, void *arg)
1242 {
1243 	struct ifbrparam *param = arg;
1244 	struct bstp_state *bs = &sc->sc_stp;
1245 
1246 	if (param->ifbrp_hellotime == 0)
1247 		return (EINVAL);
1248 	bs->bs_bridge_hello_time = param->ifbrp_hellotime << 8;
1249 	bstp_reinit(bs);
1250 
1251 	return (0);
1252 }
1253 
1254 static int
1255 bridge_ioctl_gfd(struct bridge_softc *sc, void *arg)
1256 {
1257 	struct ifbrparam *param = arg;
1258 	struct bstp_state *bs = &sc->sc_stp;
1259 
1260 	param->ifbrp_fwddelay = bs->bs_bridge_forward_delay >> 8;
1261 	return (0);
1262 }
1263 
1264 static int
1265 bridge_ioctl_sfd(struct bridge_softc *sc, void *arg)
1266 {
1267 	struct ifbrparam *param = arg;
1268 	struct bstp_state *bs = &sc->sc_stp;
1269 
1270 	if (param->ifbrp_fwddelay == 0)
1271 		return (EINVAL);
1272 	bs->bs_bridge_forward_delay = param->ifbrp_fwddelay << 8;
1273 	bstp_reinit(bs);
1274 
1275 	return (0);
1276 }
1277 
1278 static int
1279 bridge_ioctl_gma(struct bridge_softc *sc, void *arg)
1280 {
1281 	struct ifbrparam *param = arg;
1282 	struct bstp_state *bs = &sc->sc_stp;
1283 
1284 	param->ifbrp_maxage = bs->bs_bridge_max_age >> 8;
1285 	return (0);
1286 }
1287 
1288 static int
1289 bridge_ioctl_sma(struct bridge_softc *sc, void *arg)
1290 {
1291 	struct ifbrparam *param = arg;
1292 	struct bstp_state *bs = &sc->sc_stp;
1293 
1294 	if (param->ifbrp_maxage == 0)
1295 		return (EINVAL);
1296 	bs->bs_bridge_max_age = param->ifbrp_maxage << 8;
1297 	bstp_reinit(bs);
1298 
1299 	return (0);
1300 }
1301 
1302 static int
1303 bridge_ioctl_sifprio(struct bridge_softc *sc, void *arg)
1304 {
1305 	struct ifbreq *req = arg;
1306 	struct bridge_iflist *bif;
1307 
1308 	bif = bridge_lookup_member(sc, req->ifbr_ifsname);
1309 	if (bif == NULL)
1310 		return (ENOENT);
1311 
1312 	bif->bif_stp.bp_priority = req->ifbr_priority;
1313 	bstp_reinit(&sc->sc_stp);
1314 
1315 	return (0);
1316 }
1317 
1318 static int
1319 bridge_ioctl_sifcost(struct bridge_softc *sc, void *arg)
1320 {
1321 	struct ifbreq *req = arg;
1322 	struct bridge_iflist *bif;
1323 
1324 	bif = bridge_lookup_member(sc, req->ifbr_ifsname);
1325 	if (bif == NULL)
1326 		return (ENOENT);
1327 
1328 	bif->bif_stp.bp_path_cost = req->ifbr_path_cost;
1329 	bstp_reinit(&sc->sc_stp);
1330 
1331 	return (0);
1332 }
1333 
1334 static int
1335 bridge_ioctl_addspan(struct bridge_softc *sc, void *arg)
1336 {
1337 	struct ifbreq *req = arg;
1338 	struct bridge_iflist *bif = NULL;
1339 	struct ifnet *ifs;
1340 
1341 	ifs = ifunit(req->ifbr_ifsname);
1342 	if (ifs == NULL)
1343 		return (ENOENT);
1344 
1345 	LIST_FOREACH(bif, &sc->sc_spanlist, bif_next)
1346 		if (ifs == bif->bif_ifp)
1347 			return (EBUSY);
1348 
1349 	if (ifs->if_bridge != NULL)
1350 		return (EBUSY);
1351 
1352 	switch (ifs->if_type) {
1353 		case IFT_ETHER:
1354 		case IFT_GIF:
1355 		case IFT_L2VLAN:
1356 			break;
1357 		default:
1358 			return (EINVAL);
1359 	}
1360 
1361 	bif = malloc(sizeof(*bif), M_DEVBUF, M_NOWAIT|M_ZERO);
1362 	if (bif == NULL)
1363 		return (ENOMEM);
1364 
1365 	bif->bif_ifp = ifs;
1366 	bif->bif_flags = IFBIF_SPAN;
1367 
1368 	LIST_INSERT_HEAD(&sc->sc_spanlist, bif, bif_next);
1369 
1370 	return (0);
1371 }
1372 
1373 static int
1374 bridge_ioctl_delspan(struct bridge_softc *sc, void *arg)
1375 {
1376 	struct ifbreq *req = arg;
1377 	struct bridge_iflist *bif;
1378 	struct ifnet *ifs;
1379 
1380 	ifs = ifunit(req->ifbr_ifsname);
1381 	if (ifs == NULL)
1382 		return (ENOENT);
1383 
1384 	LIST_FOREACH(bif, &sc->sc_spanlist, bif_next)
1385 		if (ifs == bif->bif_ifp)
1386 			break;
1387 
1388 	if (bif == NULL)
1389 		return (ENOENT);
1390 
1391 	bridge_delete_span(sc, bif);
1392 
1393 	return (0);
1394 }
1395 
1396 static int
1397 bridge_ioctl_gbparam(struct bridge_softc *sc, void *arg)
1398 {
1399 	struct ifbropreq *req = arg;
1400 	struct bstp_port *root_port;
1401 
1402 	req->ifbop_maxage = sc->sc_stp.bs_max_age;
1403 	req->ifbop_hellotime = sc->sc_stp.bs_hello_time;
1404 	req->ifbop_fwddelay = sc->sc_stp.bs_forward_delay;
1405 
1406 	root_port = sc->sc_stp.bs_root_port;
1407 	if (root_port == NULL)
1408 		req->ifbop_root_port = 0;
1409 	else
1410 		req->ifbop_root_port = root_port->bp_ifp->if_index;
1411 
1412 	req->ifbop_root_path_cost = sc->sc_stp.bs_root_path_cost;
1413 	req->ifbop_designated_root = sc->sc_stp.bs_designated_root;
1414 	req->ifbop_last_tc_time.tv_sec = sc->sc_stp.bs_last_tc_time.tv_sec;
1415 	req->ifbop_last_tc_time.tv_usec = sc->sc_stp.bs_last_tc_time.tv_usec;
1416 
1417 	return (0);
1418 }
1419 
1420 static int
1421 bridge_ioctl_grte(struct bridge_softc *sc, void *arg)
1422 {
1423 	struct ifbrparam *param = arg;
1424 
1425 	param->ifbrp_cexceeded = sc->sc_brtexceeded;
1426 	return (0);
1427 }
1428 
1429 static int
1430 bridge_ioctl_gifsstp(struct bridge_softc *sc, void *arg)
1431 {
1432 	struct ifbpstpconf *bifstp = arg;
1433 	struct bridge_iflist *bif;
1434 	struct ifbpstpreq bpreq;
1435 	int count, len, error = 0;
1436 
1437 	count = 0;
1438 	LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
1439 		if ((bif->bif_flags & IFBIF_STP) != 0)
1440 			count++;
1441 	}
1442 
1443 	if (bifstp->ifbpstp_len == 0) {
1444 		bifstp->ifbpstp_len = sizeof(bpreq) * count;
1445 		return (0);
1446 	}
1447 
1448 	count = 0;
1449 	len = bifstp->ifbpstp_len;
1450 	bzero(&bpreq, sizeof(bpreq));
1451 	LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
1452 		if (len < sizeof(bpreq))
1453 			break;
1454 
1455 		if ((bif->bif_flags & IFBIF_STP) == 0)
1456 			continue;
1457 
1458 		bpreq.ifbp_portno = bif->bif_ifp->if_index & 0xff;
1459 		bpreq.ifbp_fwd_trans = bif->bif_stp.bp_forward_transitions;
1460 		bpreq.ifbp_design_cost = bif->bif_stp.bp_designated_cost;
1461 		bpreq.ifbp_design_port = bif->bif_stp.bp_designated_port;
1462 		bpreq.ifbp_design_bridge = bif->bif_stp.bp_designated_bridge;
1463 		bpreq.ifbp_design_root = bif->bif_stp.bp_designated_root;
1464 
1465 		error = copyout(&bpreq, bifstp->ifbpstp_req + count,
1466 				sizeof(bpreq));
1467 		if (error != 0)
1468 			break;
1469 
1470 		count++;
1471 		len -= sizeof(bpreq);
1472 	}
1473 
1474 	bifstp->ifbpstp_len = sizeof(bpreq) * count;
1475 	return (error);
1476 }
1477 
1478 /*
1479  * bridge_ifdetach:
1480  *
1481  *	Detach an interface from a bridge.  Called when a member
1482  *	interface is detaching.
1483  */
1484 static void
1485 bridge_ifdetach(void *arg __unused, struct ifnet *ifp)
1486 {
1487 	struct bridge_softc *sc = ifp->if_bridge;
1488 	struct bridge_iflist *bif;
1489 
1490 	/* Check if the interface is a bridge member */
1491 	if (sc != NULL) {
1492 		BRIDGE_LOCK(sc);
1493 
1494 		bif = bridge_lookup_member_if(sc, ifp);
1495 		if (bif != NULL)
1496 			bridge_delete_member(sc, bif, 1);
1497 
1498 		BRIDGE_UNLOCK(sc);
1499 		return;
1500 	}
1501 
1502 	/* Check if the interface is a span port */
1503 	mtx_lock(&bridge_list_mtx);
1504 	LIST_FOREACH(sc, &bridge_list, sc_list) {
1505 		BRIDGE_LOCK(sc);
1506 		LIST_FOREACH(bif, &sc->sc_spanlist, bif_next)
1507 			if (ifp == bif->bif_ifp) {
1508 				bridge_delete_span(sc, bif);
1509 				break;
1510 			}
1511 
1512 		BRIDGE_UNLOCK(sc);
1513 	}
1514 	mtx_unlock(&bridge_list_mtx);
1515 }
1516 
1517 /*
1518  * bridge_init:
1519  *
1520  *	Initialize a bridge interface.
1521  */
1522 static void
1523 bridge_init(void *xsc)
1524 {
1525 	struct bridge_softc *sc = (struct bridge_softc *)xsc;
1526 	struct ifnet *ifp = sc->sc_ifp;
1527 
1528 	if (ifp->if_drv_flags & IFF_DRV_RUNNING)
1529 		return;
1530 
1531 	BRIDGE_LOCK(sc);
1532 	callout_reset(&sc->sc_brcallout, bridge_rtable_prune_period * hz,
1533 	    bridge_timer, sc);
1534 
1535 	ifp->if_drv_flags |= IFF_DRV_RUNNING;
1536 	bstp_init(&sc->sc_stp);		/* Initialize Spanning Tree */
1537 
1538 	BRIDGE_UNLOCK(sc);
1539 }
1540 
1541 /*
1542  * bridge_stop:
1543  *
1544  *	Stop the bridge interface.
1545  */
1546 static void
1547 bridge_stop(struct ifnet *ifp, int disable)
1548 {
1549 	struct bridge_softc *sc = ifp->if_softc;
1550 
1551 	BRIDGE_LOCK_ASSERT(sc);
1552 
1553 	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1554 		return;
1555 
1556 	callout_stop(&sc->sc_brcallout);
1557 	bstp_stop(&sc->sc_stp);
1558 
1559 	bridge_rtflush(sc, IFBF_FLUSHDYN);
1560 
1561 	ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1562 }
1563 
1564 /*
1565  * bridge_enqueue:
1566  *
1567  *	Enqueue a packet on a bridge member interface.
1568  *
1569  */
1570 static void
1571 bridge_enqueue(struct bridge_softc *sc, struct ifnet *dst_ifp, struct mbuf *m)
1572 {
1573 	int len, err = 0;
1574 	short mflags;
1575 	struct mbuf *m0;
1576 
1577 	len = m->m_pkthdr.len;
1578 	mflags = m->m_flags;
1579 
1580 	/* We may be sending a fragment so traverse the mbuf */
1581 	for (; m; m = m0) {
1582 		m0 = m->m_nextpkt;
1583 		m->m_nextpkt = NULL;
1584 
1585 		if (err == 0)
1586 			IFQ_ENQUEUE(&dst_ifp->if_snd, m, err);
1587 	}
1588 
1589 	if (err == 0) {
1590 
1591 		sc->sc_ifp->if_opackets++;
1592 		sc->sc_ifp->if_obytes += len;
1593 
1594 		dst_ifp->if_obytes += len;
1595 
1596 		if (mflags & M_MCAST) {
1597 			sc->sc_ifp->if_omcasts++;
1598 			dst_ifp->if_omcasts++;
1599 		}
1600 	}
1601 
1602 	if ((dst_ifp->if_drv_flags & IFF_DRV_OACTIVE) == 0)
1603 		(*dst_ifp->if_start)(dst_ifp);
1604 }
1605 
1606 /*
1607  * bridge_dummynet:
1608  *
1609  * 	Receive a queued packet from dummynet and pass it on to the output
1610  * 	interface.
1611  *
1612  *	The mbuf has the Ethernet header already attached.
1613  */
1614 static void
1615 bridge_dummynet(struct mbuf *m, struct ifnet *ifp)
1616 {
1617 	struct bridge_softc *sc;
1618 
1619 	sc = ifp->if_bridge;
1620 
1621 	/*
1622 	 * The packet didnt originate from a member interface. This should only
1623 	 * ever happen if a member interface is removed while packets are
1624 	 * queued for it.
1625 	 */
1626 	if (sc == NULL) {
1627 		m_freem(m);
1628 		return;
1629 	}
1630 
1631 	if (PFIL_HOOKED(&inet_pfil_hook)
1632 #ifdef INET6
1633 	    || PFIL_HOOKED(&inet6_pfil_hook)
1634 #endif
1635 	    ) {
1636 		if (bridge_pfil(&m, sc->sc_ifp, ifp, PFIL_OUT) != 0)
1637 			return;
1638 		if (m == NULL)
1639 			return;
1640 	}
1641 
1642 	bridge_enqueue(sc, ifp, m);
1643 }
1644 
1645 /*
1646  * bridge_output:
1647  *
1648  *	Send output from a bridge member interface.  This
1649  *	performs the bridging function for locally originated
1650  *	packets.
1651  *
1652  *	The mbuf has the Ethernet header already attached.  We must
1653  *	enqueue or free the mbuf before returning.
1654  */
1655 static int
1656 bridge_output(struct ifnet *ifp, struct mbuf *m, struct sockaddr *sa,
1657     struct rtentry *rt)
1658 {
1659 	struct ether_header *eh;
1660 	struct ifnet *dst_if;
1661 	struct bridge_softc *sc;
1662 
1663 	if (m->m_len < ETHER_HDR_LEN) {
1664 		m = m_pullup(m, ETHER_HDR_LEN);
1665 		if (m == NULL)
1666 			return (0);
1667 	}
1668 
1669 	eh = mtod(m, struct ether_header *);
1670 	sc = ifp->if_bridge;
1671 
1672 	BRIDGE_LOCK(sc);
1673 
1674 	/*
1675 	 * If bridge is down, but the original output interface is up,
1676 	 * go ahead and send out that interface.  Otherwise, the packet
1677 	 * is dropped below.
1678 	 */
1679 	if ((sc->sc_ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
1680 		dst_if = ifp;
1681 		goto sendunicast;
1682 	}
1683 
1684 	/*
1685 	 * If the packet is a multicast, or we don't know a better way to
1686 	 * get there, send to all interfaces.
1687 	 */
1688 	if (ETHER_IS_MULTICAST(eh->ether_dhost))
1689 		dst_if = NULL;
1690 	else
1691 		dst_if = bridge_rtlookup(sc, eh->ether_dhost);
1692 	if (dst_if == NULL) {
1693 		struct bridge_iflist *bif;
1694 		struct mbuf *mc;
1695 		int error = 0, used = 0;
1696 
1697 		bridge_span(sc, m);
1698 
1699 		BRIDGE_LOCK2REF(sc, error);
1700 		if (error) {
1701 			m_freem(m);
1702 			return (0);
1703 		}
1704 
1705 		LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
1706 			dst_if = bif->bif_ifp;
1707 
1708 			if (dst_if->if_type == IFT_GIF)
1709 				continue;
1710 			if ((dst_if->if_drv_flags & IFF_DRV_RUNNING) == 0)
1711 				continue;
1712 
1713 			/*
1714 			 * If this is not the original output interface,
1715 			 * and the interface is participating in spanning
1716 			 * tree, make sure the port is in a state that
1717 			 * allows forwarding.
1718 			 */
1719 			if (dst_if != ifp &&
1720 			    (bif->bif_flags & IFBIF_STP) != 0) {
1721 				switch (bif->bif_stp.bp_state) {
1722 				case BSTP_IFSTATE_BLOCKING:
1723 				case BSTP_IFSTATE_LISTENING:
1724 				case BSTP_IFSTATE_DISABLED:
1725 					continue;
1726 				}
1727 			}
1728 
1729 			if (LIST_NEXT(bif, bif_next) == NULL) {
1730 				used = 1;
1731 				mc = m;
1732 			} else {
1733 				mc = m_copypacket(m, M_DONTWAIT);
1734 				if (mc == NULL) {
1735 					sc->sc_ifp->if_oerrors++;
1736 					continue;
1737 				}
1738 			}
1739 
1740 			bridge_enqueue(sc, dst_if, mc);
1741 		}
1742 		if (used == 0)
1743 			m_freem(m);
1744 		BRIDGE_UNREF(sc);
1745 		return (0);
1746 	}
1747 
1748 sendunicast:
1749 	/*
1750 	 * XXX Spanning tree consideration here?
1751 	 */
1752 
1753 	bridge_span(sc, m);
1754 	if ((dst_if->if_drv_flags & IFF_DRV_RUNNING) == 0) {
1755 		m_freem(m);
1756 		BRIDGE_UNLOCK(sc);
1757 		return (0);
1758 	}
1759 
1760 	BRIDGE_UNLOCK(sc);
1761 	bridge_enqueue(sc, dst_if, m);
1762 	return (0);
1763 }
1764 
1765 /*
1766  * bridge_start:
1767  *
1768  *	Start output on a bridge.
1769  *
1770  */
1771 static void
1772 bridge_start(struct ifnet *ifp)
1773 {
1774 	struct bridge_softc *sc;
1775 	struct mbuf *m;
1776 	struct ether_header *eh;
1777 	struct ifnet *dst_if;
1778 
1779 	sc = ifp->if_softc;
1780 
1781 	ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1782 	for (;;) {
1783 		IFQ_DEQUEUE(&ifp->if_snd, m);
1784 		if (m == 0)
1785 			break;
1786 		BPF_MTAP(ifp, m);
1787 
1788 		eh = mtod(m, struct ether_header *);
1789 		dst_if = NULL;
1790 
1791 		BRIDGE_LOCK(sc);
1792 		if ((m->m_flags & (M_BCAST|M_MCAST)) == 0) {
1793 			dst_if = bridge_rtlookup(sc, eh->ether_dhost);
1794 		}
1795 
1796 		if (dst_if == NULL)
1797 			bridge_broadcast(sc, ifp, m, 0);
1798 		else {
1799 			BRIDGE_UNLOCK(sc);
1800 			bridge_enqueue(sc, dst_if, m);
1801 		}
1802 	}
1803 	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1804 }
1805 
1806 /*
1807  * bridge_forward:
1808  *
1809  *	The forwarding function of the bridge.
1810  *
1811  *	NOTE: Releases the lock on return.
1812  */
1813 static void
1814 bridge_forward(struct bridge_softc *sc, struct mbuf *m)
1815 {
1816 	struct bridge_iflist *bif;
1817 	struct ifnet *src_if, *dst_if, *ifp;
1818 	struct ether_header *eh;
1819 
1820 	src_if = m->m_pkthdr.rcvif;
1821 	ifp = sc->sc_ifp;
1822 
1823 	sc->sc_ifp->if_ipackets++;
1824 	sc->sc_ifp->if_ibytes += m->m_pkthdr.len;
1825 
1826 	/*
1827 	 * Look up the bridge_iflist.
1828 	 */
1829 	bif = bridge_lookup_member_if(sc, src_if);
1830 	if (bif == NULL) {
1831 		/* Interface is not a bridge member (anymore?) */
1832 		BRIDGE_UNLOCK(sc);
1833 		m_freem(m);
1834 		return;
1835 	}
1836 
1837 	if (bif->bif_flags & IFBIF_STP) {
1838 		switch (bif->bif_stp.bp_state) {
1839 		case BSTP_IFSTATE_BLOCKING:
1840 		case BSTP_IFSTATE_LISTENING:
1841 		case BSTP_IFSTATE_DISABLED:
1842 			BRIDGE_UNLOCK(sc);
1843 			m_freem(m);
1844 			return;
1845 		}
1846 	}
1847 
1848 	eh = mtod(m, struct ether_header *);
1849 
1850 	/*
1851 	 * If the interface is learning, and the source
1852 	 * address is valid and not multicast, record
1853 	 * the address.
1854 	 */
1855 	if ((bif->bif_flags & IFBIF_LEARNING) != 0 &&
1856 	    ETHER_IS_MULTICAST(eh->ether_shost) == 0 &&
1857 	    (eh->ether_shost[0] == 0 &&
1858 	     eh->ether_shost[1] == 0 &&
1859 	     eh->ether_shost[2] == 0 &&
1860 	     eh->ether_shost[3] == 0 &&
1861 	     eh->ether_shost[4] == 0 &&
1862 	     eh->ether_shost[5] == 0) == 0) {
1863 		(void) bridge_rtupdate(sc, eh->ether_shost,
1864 		    src_if, 0, IFBAF_DYNAMIC);
1865 	}
1866 
1867 	if ((bif->bif_flags & IFBIF_STP) != 0 &&
1868 	    bif->bif_stp.bp_state == BSTP_IFSTATE_LEARNING) {
1869 		m_freem(m);
1870 		BRIDGE_UNLOCK(sc);
1871 		return;
1872 	}
1873 
1874 	/*
1875 	 * At this point, the port either doesn't participate
1876 	 * in spanning tree or it is in the forwarding state.
1877 	 */
1878 
1879 	/*
1880 	 * If the packet is unicast, destined for someone on
1881 	 * "this" side of the bridge, drop it.
1882 	 */
1883 	if ((m->m_flags & (M_BCAST|M_MCAST)) == 0) {
1884 		dst_if = bridge_rtlookup(sc, eh->ether_dhost);
1885 		if (src_if == dst_if) {
1886 			BRIDGE_UNLOCK(sc);
1887 			m_freem(m);
1888 			return;
1889 		}
1890 	} else {
1891 		/* ...forward it to all interfaces. */
1892 		sc->sc_ifp->if_imcasts++;
1893 		dst_if = NULL;
1894 	}
1895 
1896 	/*
1897 	 * If we have a destination interface which is a member of our bridge,
1898 	 * OR this is a unicast packet, push it through the bpf(4) machinery.
1899 	 * For broadcast or multicast packets, don't bother because it will
1900 	 * be reinjected into ether_input. We do this before we pass the packets
1901 	 * through the pfil(9) framework, as it is possible that pfil(9) will
1902 	 * drop the packet, or possibly modify it, making it difficult to debug
1903 	 * firewall issues on the bridge.
1904 	 */
1905 	if (dst_if != NULL || (m->m_flags & (M_BCAST | M_MCAST)) == 0)
1906 		BPF_MTAP(ifp, m);
1907 
1908 	/* run the packet filter */
1909 	if (PFIL_HOOKED(&inet_pfil_hook)
1910 #ifdef INET6
1911 	    || PFIL_HOOKED(&inet6_pfil_hook)
1912 #endif
1913 	    ) {
1914 		BRIDGE_UNLOCK(sc);
1915 		if (bridge_pfil(&m, ifp, src_if, PFIL_IN) != 0)
1916 			return;
1917 		if (m == NULL)
1918 			return;
1919 		BRIDGE_LOCK(sc);
1920 	}
1921 
1922 	if (dst_if == NULL) {
1923 		bridge_broadcast(sc, src_if, m, 1);
1924 		return;
1925 	}
1926 
1927 	/*
1928 	 * At this point, we're dealing with a unicast frame
1929 	 * going to a different interface.
1930 	 */
1931 	if ((dst_if->if_drv_flags & IFF_DRV_RUNNING) == 0) {
1932 		BRIDGE_UNLOCK(sc);
1933 		m_freem(m);
1934 		return;
1935 	}
1936 	bif = bridge_lookup_member_if(sc, dst_if);
1937 	if (bif == NULL) {
1938 		/* Not a member of the bridge (anymore?) */
1939 		BRIDGE_UNLOCK(sc);
1940 		m_freem(m);
1941 		return;
1942 	}
1943 
1944 	if (bif->bif_flags & IFBIF_STP) {
1945 		switch (bif->bif_stp.bp_state) {
1946 		case BSTP_IFSTATE_DISABLED:
1947 		case BSTP_IFSTATE_BLOCKING:
1948 			BRIDGE_UNLOCK(sc);
1949 			m_freem(m);
1950 			return;
1951 		}
1952 	}
1953 
1954 	BRIDGE_UNLOCK(sc);
1955 
1956 	if (PFIL_HOOKED(&inet_pfil_hook)
1957 #ifdef INET6
1958 	    || PFIL_HOOKED(&inet6_pfil_hook)
1959 #endif
1960 	    ) {
1961 		if (bridge_pfil(&m, sc->sc_ifp, dst_if, PFIL_OUT) != 0)
1962 			return;
1963 		if (m == NULL)
1964 			return;
1965 	}
1966 
1967 	bridge_enqueue(sc, dst_if, m);
1968 }
1969 
1970 /*
1971  * bridge_input:
1972  *
1973  *	Receive input from a member interface.  Queue the packet for
1974  *	bridging if it is not for us.
1975  */
1976 static struct mbuf *
1977 bridge_input(struct ifnet *ifp, struct mbuf *m)
1978 {
1979 	struct bridge_softc *sc = ifp->if_bridge;
1980 	struct bridge_iflist *bif;
1981 	struct ifnet *bifp;
1982 	struct ether_header *eh;
1983 	struct mbuf *mc, *mc2;
1984 
1985 	if ((sc->sc_ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1986 		return (m);
1987 
1988 	bifp = sc->sc_ifp;
1989 
1990 	/*
1991 	 * Implement support for bridge monitoring. If this flag has been
1992 	 * set on this interface, discard the packet once we push it through
1993 	 * the bpf(4) machinery, but before we do, increment the byte and
1994 	 * packet counters associated with this interface.
1995 	 */
1996 	if ((bifp->if_flags & IFF_MONITOR) != 0) {
1997 		m->m_pkthdr.rcvif  = bifp;
1998 		BPF_MTAP(bifp, m);
1999 		bifp->if_ipackets++;
2000 		bifp->if_ibytes += m->m_pkthdr.len;
2001 		m_free(m);
2002 		return (NULL);
2003 	}
2004 	BRIDGE_LOCK(sc);
2005 	bif = bridge_lookup_member_if(sc, ifp);
2006 	if (bif == NULL) {
2007 		BRIDGE_UNLOCK(sc);
2008 		return (m);
2009 	}
2010 
2011 	eh = mtod(m, struct ether_header *);
2012 
2013 	if (memcmp(eh->ether_dhost, IF_LLADDR(bifp),
2014 	    ETHER_ADDR_LEN) == 0) {
2015 		/*
2016 		 * If the packet is for us, set the packets source as the
2017 		 * bridge, and return the packet back to ether_input for
2018 		 * local processing.
2019 		 */
2020 
2021 		/* Mark the packet as arriving on the bridge interface */
2022 		m->m_pkthdr.rcvif = bifp;
2023 		BPF_MTAP(bifp, m);
2024 		bifp->if_ipackets++;
2025 
2026 		BRIDGE_UNLOCK(sc);
2027 		return (m);
2028 	}
2029 
2030 	bridge_span(sc, m);
2031 
2032 	if (ETHER_IS_MULTICAST(eh->ether_dhost)) {
2033 		/* Tap off 802.1D packets; they do not get forwarded. */
2034 		if (memcmp(eh->ether_dhost, bstp_etheraddr,
2035 		    ETHER_ADDR_LEN) == 0) {
2036 			m = bstp_input(&bif->bif_stp, ifp, m);
2037 			if (m == NULL) {
2038 				BRIDGE_UNLOCK(sc);
2039 				return (NULL);
2040 			}
2041 		}
2042 
2043 		if (bif->bif_flags & IFBIF_STP) {
2044 			switch (bif->bif_stp.bp_state) {
2045 			case BSTP_IFSTATE_BLOCKING:
2046 			case BSTP_IFSTATE_LISTENING:
2047 			case BSTP_IFSTATE_DISABLED:
2048 				BRIDGE_UNLOCK(sc);
2049 				return (m);
2050 			}
2051 		}
2052 
2053 		if (bcmp(etherbroadcastaddr, eh->ether_dhost,
2054 		    sizeof(etherbroadcastaddr)) == 0)
2055 			m->m_flags |= M_BCAST;
2056 		else
2057 			m->m_flags |= M_MCAST;
2058 
2059 		/*
2060 		 * Make a deep copy of the packet and enqueue the copy
2061 		 * for bridge processing; return the original packet for
2062 		 * local processing.
2063 		 */
2064 		mc = m_dup(m, M_DONTWAIT);
2065 		if (mc == NULL) {
2066 			BRIDGE_UNLOCK(sc);
2067 			return (m);
2068 		}
2069 
2070 		/* Perform the bridge forwarding function with the copy. */
2071 		bridge_forward(sc, mc);
2072 
2073 		/*
2074 		 * Reinject the mbuf as arriving on the bridge so we have a
2075 		 * chance at claiming multicast packets. We can not loop back
2076 		 * here from ether_input as a bridge is never a member of a
2077 		 * bridge.
2078 		 */
2079 		KASSERT(bifp->if_bridge == NULL,
2080 		    ("loop created in bridge_input"));
2081 		mc2 = m_dup(m, M_DONTWAIT);
2082 		if (mc2 != NULL) {
2083 			/* Keep the layer3 header aligned */
2084 			int i = min(mc2->m_pkthdr.len, max_protohdr);
2085 			mc2 = m_copyup(mc2, i, ETHER_ALIGN);
2086 		}
2087 		if (mc2 != NULL) {
2088 			mc2->m_pkthdr.rcvif = bifp;
2089 			(*bifp->if_input)(bifp, mc2);
2090 		}
2091 
2092 		/* Return the original packet for local processing. */
2093 		return (m);
2094 	}
2095 
2096 	if (bif->bif_flags & IFBIF_STP) {
2097 		switch (bif->bif_stp.bp_state) {
2098 		case BSTP_IFSTATE_BLOCKING:
2099 		case BSTP_IFSTATE_LISTENING:
2100 		case BSTP_IFSTATE_DISABLED:
2101 			BRIDGE_UNLOCK(sc);
2102 			return (m);
2103 		}
2104 	}
2105 
2106 	/*
2107 	 * Unicast.  Make sure it's not for us.
2108 	 */
2109 	LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
2110 		if (bif->bif_ifp->if_type == IFT_GIF)
2111 			continue;
2112 		/* It is destined for us. */
2113 		if (memcmp(IF_LLADDR(bif->bif_ifp), eh->ether_dhost,
2114 		    ETHER_ADDR_LEN) == 0
2115 #ifdef DEV_CARP
2116 		    || (bif->bif_ifp->if_carp
2117 			&& carp_forus(bif->bif_ifp->if_carp, eh->ether_dhost))
2118 #endif
2119 		    ) {
2120 			if (bif->bif_flags & IFBIF_LEARNING)
2121 				(void) bridge_rtupdate(sc,
2122 				    eh->ether_shost, ifp, 0, IFBAF_DYNAMIC);
2123 			m->m_pkthdr.rcvif = bif->bif_ifp;
2124 			BRIDGE_UNLOCK(sc);
2125 			return (m);
2126 		}
2127 
2128 		/* We just received a packet that we sent out. */
2129 		if (memcmp(IF_LLADDR(bif->bif_ifp), eh->ether_shost,
2130 		    ETHER_ADDR_LEN) == 0
2131 #ifdef DEV_CARP
2132 		    || (bif->bif_ifp->if_carp
2133 			&& carp_forus(bif->bif_ifp->if_carp, eh->ether_shost))
2134 #endif
2135 		    ) {
2136 			BRIDGE_UNLOCK(sc);
2137 			m_freem(m);
2138 			return (NULL);
2139 		}
2140 	}
2141 
2142 	/* Perform the bridge forwarding function. */
2143 	bridge_forward(sc, m);
2144 
2145 	return (NULL);
2146 }
2147 
2148 /*
2149  * bridge_broadcast:
2150  *
2151  *	Send a frame to all interfaces that are members of
2152  *	the bridge, except for the one on which the packet
2153  *	arrived.
2154  *
2155  *	NOTE: Releases the lock on return.
2156  */
2157 static void
2158 bridge_broadcast(struct bridge_softc *sc, struct ifnet *src_if,
2159     struct mbuf *m, int runfilt)
2160 {
2161 	struct bridge_iflist *bif;
2162 	struct mbuf *mc;
2163 	struct ifnet *dst_if;
2164 	int error = 0, used = 0, i;
2165 
2166 	BRIDGE_LOCK2REF(sc, error);
2167 	if (error) {
2168 		m_freem(m);
2169 		return;
2170 	}
2171 
2172 	/* Filter on the bridge interface before broadcasting */
2173 	if (runfilt && (PFIL_HOOKED(&inet_pfil_hook)
2174 #ifdef INET6
2175 	    || PFIL_HOOKED(&inet6_pfil_hook)
2176 #endif
2177 	    )) {
2178 		if (bridge_pfil(&m, sc->sc_ifp, NULL, PFIL_OUT) != 0)
2179 			goto out;
2180 		if (m == NULL)
2181 			goto out;
2182 	}
2183 
2184 	LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
2185 		dst_if = bif->bif_ifp;
2186 		if (dst_if == src_if)
2187 			continue;
2188 
2189 		if (bif->bif_flags & IFBIF_STP) {
2190 			switch (bif->bif_stp.bp_state) {
2191 			case BSTP_IFSTATE_BLOCKING:
2192 			case BSTP_IFSTATE_DISABLED:
2193 				continue;
2194 			}
2195 		}
2196 
2197 		if ((bif->bif_flags & IFBIF_DISCOVER) == 0 &&
2198 		    (m->m_flags & (M_BCAST|M_MCAST)) == 0)
2199 			continue;
2200 
2201 		if ((dst_if->if_drv_flags & IFF_DRV_RUNNING) == 0)
2202 			continue;
2203 
2204 		if (LIST_NEXT(bif, bif_next) == NULL) {
2205 			mc = m;
2206 			used = 1;
2207 		} else {
2208 			mc = m_dup(m, M_DONTWAIT);
2209 			if (mc == NULL) {
2210 				sc->sc_ifp->if_oerrors++;
2211 				continue;
2212 			}
2213 		}
2214 
2215 		/*
2216 		 * Filter on the output interface. Pass a NULL bridge interface
2217 		 * pointer so we do not redundantly filter on the bridge for
2218 		 * each interface we broadcast on.
2219 		 */
2220 		if (runfilt && (PFIL_HOOKED(&inet_pfil_hook)
2221 #ifdef INET6
2222 		    || PFIL_HOOKED(&inet6_pfil_hook)
2223 #endif
2224 		    )) {
2225 			if (used == 0) {
2226 				/* Keep the layer3 header aligned */
2227 				i = min(mc->m_pkthdr.len, max_protohdr);
2228 				mc = m_copyup(mc, i, ETHER_ALIGN);
2229 				if (mc == NULL) {
2230 					sc->sc_ifp->if_oerrors++;
2231 					continue;
2232 				}
2233 			}
2234 			if (bridge_pfil(&mc, NULL, dst_if, PFIL_OUT) != 0)
2235 				continue;
2236 			if (mc == NULL)
2237 				continue;
2238 		}
2239 
2240 		bridge_enqueue(sc, dst_if, mc);
2241 	}
2242 	if (used == 0)
2243 		m_freem(m);
2244 
2245 out:
2246 	BRIDGE_UNREF(sc);
2247 }
2248 
2249 /*
2250  * bridge_span:
2251  *
2252  *	Duplicate a packet out one or more interfaces that are in span mode,
2253  *	the original mbuf is unmodified.
2254  */
2255 static void
2256 bridge_span(struct bridge_softc *sc, struct mbuf *m)
2257 {
2258 	struct bridge_iflist *bif;
2259 	struct ifnet *dst_if;
2260 	struct mbuf *mc;
2261 
2262 	if (LIST_EMPTY(&sc->sc_spanlist))
2263 		return;
2264 
2265 	LIST_FOREACH(bif, &sc->sc_spanlist, bif_next) {
2266 		dst_if = bif->bif_ifp;
2267 
2268 		if ((dst_if->if_drv_flags & IFF_DRV_RUNNING) == 0)
2269 			continue;
2270 
2271 		mc = m_copypacket(m, M_DONTWAIT);
2272 		if (mc == NULL) {
2273 			sc->sc_ifp->if_oerrors++;
2274 			continue;
2275 		}
2276 
2277 		bridge_enqueue(sc, dst_if, mc);
2278 	}
2279 }
2280 
2281 /*
2282  * bridge_rtupdate:
2283  *
2284  *	Add a bridge routing entry.
2285  */
2286 static int
2287 bridge_rtupdate(struct bridge_softc *sc, const uint8_t *dst,
2288     struct ifnet *dst_if, int setflags, uint8_t flags)
2289 {
2290 	struct bridge_rtnode *brt;
2291 	int error;
2292 
2293 	BRIDGE_LOCK_ASSERT(sc);
2294 
2295 	/*
2296 	 * A route for this destination might already exist.  If so,
2297 	 * update it, otherwise create a new one.
2298 	 */
2299 	if ((brt = bridge_rtnode_lookup(sc, dst)) == NULL) {
2300 		if (sc->sc_brtcnt >= sc->sc_brtmax) {
2301 			sc->sc_brtexceeded++;
2302 			return (ENOSPC);
2303 		}
2304 
2305 		/*
2306 		 * Allocate a new bridge forwarding node, and
2307 		 * initialize the expiration time and Ethernet
2308 		 * address.
2309 		 */
2310 		brt = uma_zalloc(bridge_rtnode_zone, M_NOWAIT | M_ZERO);
2311 		if (brt == NULL)
2312 			return (ENOMEM);
2313 
2314 		brt->brt_flags = IFBAF_DYNAMIC;
2315 		memcpy(brt->brt_addr, dst, ETHER_ADDR_LEN);
2316 
2317 		if ((error = bridge_rtnode_insert(sc, brt)) != 0) {
2318 			uma_zfree(bridge_rtnode_zone, brt);
2319 			return (error);
2320 		}
2321 	}
2322 
2323 	if ((brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC)
2324 		brt->brt_ifp = dst_if;
2325 	if ((flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC)
2326 		brt->brt_expire = time_uptime + sc->sc_brttimeout;
2327 	if (setflags)
2328 		brt->brt_flags = flags;
2329 
2330 	return (0);
2331 }
2332 
2333 /*
2334  * bridge_rtlookup:
2335  *
2336  *	Lookup the destination interface for an address.
2337  */
2338 static struct ifnet *
2339 bridge_rtlookup(struct bridge_softc *sc, const uint8_t *addr)
2340 {
2341 	struct bridge_rtnode *brt;
2342 
2343 	BRIDGE_LOCK_ASSERT(sc);
2344 
2345 	if ((brt = bridge_rtnode_lookup(sc, addr)) == NULL)
2346 		return (NULL);
2347 
2348 	return (brt->brt_ifp);
2349 }
2350 
2351 /*
2352  * bridge_rttrim:
2353  *
2354  *	Trim the routine table so that we have a number
2355  *	of routing entries less than or equal to the
2356  *	maximum number.
2357  */
2358 static void
2359 bridge_rttrim(struct bridge_softc *sc)
2360 {
2361 	struct bridge_rtnode *brt, *nbrt;
2362 
2363 	BRIDGE_LOCK_ASSERT(sc);
2364 
2365 	/* Make sure we actually need to do this. */
2366 	if (sc->sc_brtcnt <= sc->sc_brtmax)
2367 		return;
2368 
2369 	/* Force an aging cycle; this might trim enough addresses. */
2370 	bridge_rtage(sc);
2371 	if (sc->sc_brtcnt <= sc->sc_brtmax)
2372 		return;
2373 
2374 	LIST_FOREACH_SAFE(brt, &sc->sc_rtlist, brt_list, nbrt) {
2375 		if ((brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC) {
2376 			bridge_rtnode_destroy(sc, brt);
2377 			if (sc->sc_brtcnt <= sc->sc_brtmax)
2378 				return;
2379 		}
2380 	}
2381 }
2382 
2383 /*
2384  * bridge_timer:
2385  *
2386  *	Aging timer for the bridge.
2387  */
2388 static void
2389 bridge_timer(void *arg)
2390 {
2391 	struct bridge_softc *sc = arg;
2392 
2393 	BRIDGE_LOCK_ASSERT(sc);
2394 
2395 	bridge_rtage(sc);
2396 
2397 	if (sc->sc_ifp->if_drv_flags & IFF_DRV_RUNNING)
2398 		callout_reset(&sc->sc_brcallout,
2399 		    bridge_rtable_prune_period * hz, bridge_timer, sc);
2400 }
2401 
2402 /*
2403  * bridge_rtage:
2404  *
2405  *	Perform an aging cycle.
2406  */
2407 static void
2408 bridge_rtage(struct bridge_softc *sc)
2409 {
2410 	struct bridge_rtnode *brt, *nbrt;
2411 
2412 	BRIDGE_LOCK_ASSERT(sc);
2413 
2414 	LIST_FOREACH_SAFE(brt, &sc->sc_rtlist, brt_list, nbrt) {
2415 		if ((brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC) {
2416 			if (time_uptime >= brt->brt_expire)
2417 				bridge_rtnode_destroy(sc, brt);
2418 		}
2419 	}
2420 }
2421 
2422 /*
2423  * bridge_rtflush:
2424  *
2425  *	Remove all dynamic addresses from the bridge.
2426  */
2427 static void
2428 bridge_rtflush(struct bridge_softc *sc, int full)
2429 {
2430 	struct bridge_rtnode *brt, *nbrt;
2431 
2432 	BRIDGE_LOCK_ASSERT(sc);
2433 
2434 	LIST_FOREACH_SAFE(brt, &sc->sc_rtlist, brt_list, nbrt) {
2435 		if (full || (brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC)
2436 			bridge_rtnode_destroy(sc, brt);
2437 	}
2438 }
2439 
2440 /*
2441  * bridge_rtdaddr:
2442  *
2443  *	Remove an address from the table.
2444  */
2445 static int
2446 bridge_rtdaddr(struct bridge_softc *sc, const uint8_t *addr)
2447 {
2448 	struct bridge_rtnode *brt;
2449 
2450 	BRIDGE_LOCK_ASSERT(sc);
2451 
2452 	if ((brt = bridge_rtnode_lookup(sc, addr)) == NULL)
2453 		return (ENOENT);
2454 
2455 	bridge_rtnode_destroy(sc, brt);
2456 	return (0);
2457 }
2458 
2459 /*
2460  * bridge_rtdelete:
2461  *
2462  *	Delete routes to a speicifc member interface.
2463  */
2464 static void
2465 bridge_rtdelete(struct bridge_softc *sc, struct ifnet *ifp, int full)
2466 {
2467 	struct bridge_rtnode *brt, *nbrt;
2468 
2469 	BRIDGE_LOCK_ASSERT(sc);
2470 
2471 	LIST_FOREACH_SAFE(brt, &sc->sc_rtlist, brt_list, nbrt) {
2472 		if (brt->brt_ifp == ifp && (full ||
2473 			    (brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC))
2474 			bridge_rtnode_destroy(sc, brt);
2475 	}
2476 }
2477 
2478 /*
2479  * bridge_rtable_init:
2480  *
2481  *	Initialize the route table for this bridge.
2482  */
2483 static int
2484 bridge_rtable_init(struct bridge_softc *sc)
2485 {
2486 	int i;
2487 
2488 	sc->sc_rthash = malloc(sizeof(*sc->sc_rthash) * BRIDGE_RTHASH_SIZE,
2489 	    M_DEVBUF, M_NOWAIT);
2490 	if (sc->sc_rthash == NULL)
2491 		return (ENOMEM);
2492 
2493 	for (i = 0; i < BRIDGE_RTHASH_SIZE; i++)
2494 		LIST_INIT(&sc->sc_rthash[i]);
2495 
2496 	sc->sc_rthash_key = arc4random();
2497 
2498 	LIST_INIT(&sc->sc_rtlist);
2499 
2500 	return (0);
2501 }
2502 
2503 /*
2504  * bridge_rtable_fini:
2505  *
2506  *	Deconstruct the route table for this bridge.
2507  */
2508 static void
2509 bridge_rtable_fini(struct bridge_softc *sc)
2510 {
2511 
2512 	free(sc->sc_rthash, M_DEVBUF);
2513 }
2514 
2515 /*
2516  * The following hash function is adapted from "Hash Functions" by Bob Jenkins
2517  * ("Algorithm Alley", Dr. Dobbs Journal, September 1997).
2518  */
2519 #define	mix(a, b, c)							\
2520 do {									\
2521 	a -= b; a -= c; a ^= (c >> 13);					\
2522 	b -= c; b -= a; b ^= (a << 8);					\
2523 	c -= a; c -= b; c ^= (b >> 13);					\
2524 	a -= b; a -= c; a ^= (c >> 12);					\
2525 	b -= c; b -= a; b ^= (a << 16);					\
2526 	c -= a; c -= b; c ^= (b >> 5);					\
2527 	a -= b; a -= c; a ^= (c >> 3);					\
2528 	b -= c; b -= a; b ^= (a << 10);					\
2529 	c -= a; c -= b; c ^= (b >> 15);					\
2530 } while (/*CONSTCOND*/0)
2531 
2532 static __inline uint32_t
2533 bridge_rthash(struct bridge_softc *sc, const uint8_t *addr)
2534 {
2535 	uint32_t a = 0x9e3779b9, b = 0x9e3779b9, c = sc->sc_rthash_key;
2536 
2537 	b += addr[5] << 8;
2538 	b += addr[4];
2539 	a += addr[3] << 24;
2540 	a += addr[2] << 16;
2541 	a += addr[1] << 8;
2542 	a += addr[0];
2543 
2544 	mix(a, b, c);
2545 
2546 	return (c & BRIDGE_RTHASH_MASK);
2547 }
2548 
2549 #undef mix
2550 
2551 static int
2552 bridge_rtnode_addr_cmp(const uint8_t *a, const uint8_t *b)
2553 {
2554 	int i, d;
2555 
2556 	for (i = 0, d = 0; i < ETHER_ADDR_LEN && d == 0; i++) {
2557 		d = ((int)a[i]) - ((int)b[i]);
2558 	}
2559 
2560 	return (d);
2561 }
2562 
2563 /*
2564  * bridge_rtnode_lookup:
2565  *
2566  *	Look up a bridge route node for the specified destination.
2567  */
2568 static struct bridge_rtnode *
2569 bridge_rtnode_lookup(struct bridge_softc *sc, const uint8_t *addr)
2570 {
2571 	struct bridge_rtnode *brt;
2572 	uint32_t hash;
2573 	int dir;
2574 
2575 	BRIDGE_LOCK_ASSERT(sc);
2576 
2577 	hash = bridge_rthash(sc, addr);
2578 	LIST_FOREACH(brt, &sc->sc_rthash[hash], brt_hash) {
2579 		dir = bridge_rtnode_addr_cmp(addr, brt->brt_addr);
2580 		if (dir == 0)
2581 			return (brt);
2582 		if (dir > 0)
2583 			return (NULL);
2584 	}
2585 
2586 	return (NULL);
2587 }
2588 
2589 /*
2590  * bridge_rtnode_insert:
2591  *
2592  *	Insert the specified bridge node into the route table.  We
2593  *	assume the entry is not already in the table.
2594  */
2595 static int
2596 bridge_rtnode_insert(struct bridge_softc *sc, struct bridge_rtnode *brt)
2597 {
2598 	struct bridge_rtnode *lbrt;
2599 	uint32_t hash;
2600 	int dir;
2601 
2602 	BRIDGE_LOCK_ASSERT(sc);
2603 
2604 	hash = bridge_rthash(sc, brt->brt_addr);
2605 
2606 	lbrt = LIST_FIRST(&sc->sc_rthash[hash]);
2607 	if (lbrt == NULL) {
2608 		LIST_INSERT_HEAD(&sc->sc_rthash[hash], brt, brt_hash);
2609 		goto out;
2610 	}
2611 
2612 	do {
2613 		dir = bridge_rtnode_addr_cmp(brt->brt_addr, lbrt->brt_addr);
2614 		if (dir == 0)
2615 			return (EEXIST);
2616 		if (dir > 0) {
2617 			LIST_INSERT_BEFORE(lbrt, brt, brt_hash);
2618 			goto out;
2619 		}
2620 		if (LIST_NEXT(lbrt, brt_hash) == NULL) {
2621 			LIST_INSERT_AFTER(lbrt, brt, brt_hash);
2622 			goto out;
2623 		}
2624 		lbrt = LIST_NEXT(lbrt, brt_hash);
2625 	} while (lbrt != NULL);
2626 
2627 #ifdef DIAGNOSTIC
2628 	panic("bridge_rtnode_insert: impossible");
2629 #endif
2630 
2631 out:
2632 	LIST_INSERT_HEAD(&sc->sc_rtlist, brt, brt_list);
2633 	sc->sc_brtcnt++;
2634 
2635 	return (0);
2636 }
2637 
2638 /*
2639  * bridge_rtnode_destroy:
2640  *
2641  *	Destroy a bridge rtnode.
2642  */
2643 static void
2644 bridge_rtnode_destroy(struct bridge_softc *sc, struct bridge_rtnode *brt)
2645 {
2646 	BRIDGE_LOCK_ASSERT(sc);
2647 
2648 	LIST_REMOVE(brt, brt_hash);
2649 
2650 	LIST_REMOVE(brt, brt_list);
2651 	sc->sc_brtcnt--;
2652 	uma_zfree(bridge_rtnode_zone, brt);
2653 }
2654 
2655 /*
2656  * bridge_state_change:
2657  *
2658  *	Callback from the bridgestp code when a port changes states.
2659  */
2660 static void
2661 bridge_state_change(struct ifnet *ifp, int state)
2662 {
2663 	struct bridge_softc *sc = ifp->if_bridge;
2664 	static const char *stpstates[] = {
2665 		"disabled",
2666 		"listening",
2667 		"learning",
2668 		"forwarding",
2669 		"blocking",
2670 	};
2671 
2672 	if (log_stp)
2673 		log(LOG_NOTICE, "%s: state changed to %s on %s\n",
2674 		    sc->sc_ifp->if_xname, stpstates[state], ifp->if_xname);
2675 
2676 	/* if the port is blocking then remove any routes to it */
2677 	switch (state) {
2678 		case BSTP_IFSTATE_DISABLED:
2679 		case BSTP_IFSTATE_BLOCKING:
2680 			BRIDGE_LOCK(sc);
2681 			bridge_rtdelete(sc, ifp, IFBF_FLUSHDYN);
2682 			BRIDGE_UNLOCK(sc);
2683 	}
2684 }
2685 
2686 /*
2687  * Send bridge packets through pfil if they are one of the types pfil can deal
2688  * with, or if they are ARP or REVARP.  (pfil will pass ARP and REVARP without
2689  * question.) If *bifp or *ifp are NULL then packet filtering is skipped for
2690  * that interface.
2691  */
2692 static int
2693 bridge_pfil(struct mbuf **mp, struct ifnet *bifp, struct ifnet *ifp, int dir)
2694 {
2695 	int snap, error, i, hlen;
2696 	struct ether_header *eh1, eh2;
2697 	struct ip_fw_args args;
2698 	struct ip *ip;
2699 	struct llc llc1;
2700 	u_int16_t ether_type;
2701 
2702 	snap = 0;
2703 	error = -1;	/* Default error if not error == 0 */
2704 
2705 	/* we may return with the IP fields swapped, ensure its not shared */
2706 	KASSERT(M_WRITABLE(*mp), ("%s: modifying a shared mbuf", __func__));
2707 
2708 	if (pfil_bridge == 0 && pfil_member == 0 && pfil_ipfw == 0)
2709 		return (0); /* filtering is disabled */
2710 
2711 	i = min((*mp)->m_pkthdr.len, max_protohdr);
2712 	if ((*mp)->m_len < i) {
2713 	    *mp = m_pullup(*mp, i);
2714 	    if (*mp == NULL) {
2715 		printf("%s: m_pullup failed\n", __func__);
2716 		return (-1);
2717 	    }
2718 	}
2719 
2720 	eh1 = mtod(*mp, struct ether_header *);
2721 	ether_type = ntohs(eh1->ether_type);
2722 
2723 	/*
2724 	 * Check for SNAP/LLC.
2725 	 */
2726 	if (ether_type < ETHERMTU) {
2727 		struct llc *llc2 = (struct llc *)(eh1 + 1);
2728 
2729 		if ((*mp)->m_len >= ETHER_HDR_LEN + 8 &&
2730 		    llc2->llc_dsap == LLC_SNAP_LSAP &&
2731 		    llc2->llc_ssap == LLC_SNAP_LSAP &&
2732 		    llc2->llc_control == LLC_UI) {
2733 			ether_type = htons(llc2->llc_un.type_snap.ether_type);
2734 			snap = 1;
2735 		}
2736 	}
2737 
2738 	/*
2739 	 * If we're trying to filter bridge traffic, don't look at anything
2740 	 * other than IP and ARP traffic.  If the filter doesn't understand
2741 	 * IPv6, don't allow IPv6 through the bridge either.  This is lame
2742 	 * since if we really wanted, say, an AppleTalk filter, we are hosed,
2743 	 * but of course we don't have an AppleTalk filter to begin with.
2744 	 * (Note that since pfil doesn't understand ARP it will pass *ALL*
2745 	 * ARP traffic.)
2746 	 */
2747 	switch (ether_type) {
2748 		case ETHERTYPE_ARP:
2749 		case ETHERTYPE_REVARP:
2750 			if (pfil_ipfw_arp == 0)
2751 				return (0); /* Automatically pass */
2752 			break;
2753 
2754 		case ETHERTYPE_IP:
2755 #ifdef INET6
2756 		case ETHERTYPE_IPV6:
2757 #endif /* INET6 */
2758 			break;
2759 		default:
2760 			/*
2761 			 * Check to see if the user wants to pass non-ip
2762 			 * packets, these will not be checked by pfil(9) and
2763 			 * passed unconditionally so the default is to drop.
2764 			 */
2765 			if (pfil_onlyip)
2766 				goto bad;
2767 	}
2768 
2769 	/* Strip off the Ethernet header and keep a copy. */
2770 	m_copydata(*mp, 0, ETHER_HDR_LEN, (caddr_t) &eh2);
2771 	m_adj(*mp, ETHER_HDR_LEN);
2772 
2773 	/* Strip off snap header, if present */
2774 	if (snap) {
2775 		m_copydata(*mp, 0, sizeof(struct llc), (caddr_t) &llc1);
2776 		m_adj(*mp, sizeof(struct llc));
2777 	}
2778 
2779 	/*
2780 	 * Check the IP header for alignment and errors
2781 	 */
2782 	if (dir == PFIL_IN) {
2783 		switch (ether_type) {
2784 			case ETHERTYPE_IP:
2785 				error = bridge_ip_checkbasic(mp);
2786 				break;
2787 #ifdef INET6
2788 			case ETHERTYPE_IPV6:
2789 				error = bridge_ip6_checkbasic(mp);
2790 				break;
2791 #endif /* INET6 */
2792 			default:
2793 				error = 0;
2794 		}
2795 		if (error)
2796 			goto bad;
2797 	}
2798 
2799 	if (IPFW_LOADED && pfil_ipfw != 0 && dir == PFIL_OUT && ifp != NULL) {
2800 		error = -1;
2801 		args.rule = ip_dn_claim_rule(*mp);
2802 		if (args.rule != NULL && fw_one_pass)
2803 			goto ipfwpass; /* packet already partially processed */
2804 
2805 		args.m = *mp;
2806 		args.oif = ifp;
2807 		args.next_hop = NULL;
2808 		args.eh = &eh2;
2809 		args.inp = NULL;	/* used by ipfw uid/gid/jail rules */
2810 		i = ip_fw_chk_ptr(&args);
2811 		*mp = args.m;
2812 
2813 		if (*mp == NULL)
2814 			return (error);
2815 
2816 		if (DUMMYNET_LOADED && (i == IP_FW_DUMMYNET)) {
2817 
2818 			/* put the Ethernet header back on */
2819 			M_PREPEND(*mp, ETHER_HDR_LEN, M_DONTWAIT);
2820 			if (*mp == NULL)
2821 				return (error);
2822 			bcopy(&eh2, mtod(*mp, caddr_t), ETHER_HDR_LEN);
2823 
2824 			/*
2825 			 * Pass the pkt to dummynet, which consumes it. The
2826 			 * packet will return to us via bridge_dummynet().
2827 			 */
2828 			args.oif = ifp;
2829 			ip_dn_io_ptr(*mp, DN_TO_IFB_FWD, &args);
2830 			return (error);
2831 		}
2832 
2833 		if (i != IP_FW_PASS) /* drop */
2834 			goto bad;
2835 	}
2836 
2837 ipfwpass:
2838 	error = 0;
2839 
2840 	/*
2841 	 * Run the packet through pfil
2842 	 */
2843 	switch (ether_type) {
2844 	case ETHERTYPE_IP:
2845 		/*
2846 		 * before calling the firewall, swap fields the same as
2847 		 * IP does. here we assume the header is contiguous
2848 		 */
2849 		ip = mtod(*mp, struct ip *);
2850 
2851 		ip->ip_len = ntohs(ip->ip_len);
2852 		ip->ip_off = ntohs(ip->ip_off);
2853 
2854 		/*
2855 		 * Run pfil on the member interface and the bridge, both can
2856 		 * be skipped by clearing pfil_member or pfil_bridge.
2857 		 *
2858 		 * Keep the order:
2859 		 *   in_if -> bridge_if -> out_if
2860 		 */
2861 		if (pfil_bridge && dir == PFIL_OUT && bifp != NULL)
2862 			error = pfil_run_hooks(&inet_pfil_hook, mp, bifp,
2863 					dir, NULL);
2864 
2865 		if (*mp == NULL || error != 0) /* filter may consume */
2866 			break;
2867 
2868 		if (pfil_member && ifp != NULL)
2869 			error = pfil_run_hooks(&inet_pfil_hook, mp, ifp,
2870 					dir, NULL);
2871 
2872 		if (*mp == NULL || error != 0) /* filter may consume */
2873 			break;
2874 
2875 		if (pfil_bridge && dir == PFIL_IN && bifp != NULL)
2876 			error = pfil_run_hooks(&inet_pfil_hook, mp, bifp,
2877 					dir, NULL);
2878 
2879 		if (*mp == NULL || error != 0) /* filter may consume */
2880 			break;
2881 
2882 		/* check if we need to fragment the packet */
2883 		if (pfil_member && ifp != NULL && dir == PFIL_OUT) {
2884 			i = (*mp)->m_pkthdr.len;
2885 			if (i > ifp->if_mtu) {
2886 				error = bridge_fragment(ifp, *mp, &eh2, snap,
2887 					    &llc1);
2888 				return (error);
2889 			}
2890 		}
2891 
2892 		/* Recalculate the ip checksum and restore byte ordering */
2893 		ip = mtod(*mp, struct ip *);
2894 		hlen = ip->ip_hl << 2;
2895 		if (hlen < sizeof(struct ip))
2896 			goto bad;
2897 		if (hlen > (*mp)->m_len) {
2898 			if ((*mp = m_pullup(*mp, hlen)) == 0)
2899 				goto bad;
2900 			ip = mtod(*mp, struct ip *);
2901 			if (ip == NULL)
2902 				goto bad;
2903 		}
2904 		ip->ip_len = htons(ip->ip_len);
2905 		ip->ip_off = htons(ip->ip_off);
2906 		ip->ip_sum = 0;
2907 		if (hlen == sizeof(struct ip))
2908 			ip->ip_sum = in_cksum_hdr(ip);
2909 		else
2910 			ip->ip_sum = in_cksum(*mp, hlen);
2911 
2912 		break;
2913 #ifdef INET6
2914 	case ETHERTYPE_IPV6:
2915 		if (pfil_bridge && dir == PFIL_OUT && bifp != NULL)
2916 			error = pfil_run_hooks(&inet6_pfil_hook, mp, bifp,
2917 					dir, NULL);
2918 
2919 		if (*mp == NULL || error != 0) /* filter may consume */
2920 			break;
2921 
2922 		if (pfil_member && ifp != NULL)
2923 			error = pfil_run_hooks(&inet6_pfil_hook, mp, ifp,
2924 					dir, NULL);
2925 
2926 		if (*mp == NULL || error != 0) /* filter may consume */
2927 			break;
2928 
2929 		if (pfil_bridge && dir == PFIL_IN && bifp != NULL)
2930 			error = pfil_run_hooks(&inet6_pfil_hook, mp, bifp,
2931 					dir, NULL);
2932 		break;
2933 #endif
2934 	default:
2935 		error = 0;
2936 		break;
2937 	}
2938 
2939 	if (*mp == NULL)
2940 		return (error);
2941 	if (error != 0)
2942 		goto bad;
2943 
2944 	error = -1;
2945 
2946 	/*
2947 	 * Finally, put everything back the way it was and return
2948 	 */
2949 	if (snap) {
2950 		M_PREPEND(*mp, sizeof(struct llc), M_DONTWAIT);
2951 		if (*mp == NULL)
2952 			return (error);
2953 		bcopy(&llc1, mtod(*mp, caddr_t), sizeof(struct llc));
2954 	}
2955 
2956 	M_PREPEND(*mp, ETHER_HDR_LEN, M_DONTWAIT);
2957 	if (*mp == NULL)
2958 		return (error);
2959 	bcopy(&eh2, mtod(*mp, caddr_t), ETHER_HDR_LEN);
2960 
2961 	return (0);
2962 
2963 bad:
2964 	m_freem(*mp);
2965 	*mp = NULL;
2966 	return (error);
2967 }
2968 
2969 /*
2970  * Perform basic checks on header size since
2971  * pfil assumes ip_input has already processed
2972  * it for it.  Cut-and-pasted from ip_input.c.
2973  * Given how simple the IPv6 version is,
2974  * does the IPv4 version really need to be
2975  * this complicated?
2976  *
2977  * XXX Should we update ipstat here, or not?
2978  * XXX Right now we update ipstat but not
2979  * XXX csum_counter.
2980  */
2981 static int
2982 bridge_ip_checkbasic(struct mbuf **mp)
2983 {
2984 	struct mbuf *m = *mp;
2985 	struct ip *ip;
2986 	int len, hlen;
2987 	u_short sum;
2988 
2989 	if (*mp == NULL)
2990 		return (-1);
2991 
2992 	if (IP_HDR_ALIGNED_P(mtod(m, caddr_t)) == 0) {
2993 		if ((m = m_copyup(m, sizeof(struct ip),
2994 			(max_linkhdr + 3) & ~3)) == NULL) {
2995 			/* XXXJRT new stat, please */
2996 			ipstat.ips_toosmall++;
2997 			goto bad;
2998 		}
2999 	} else if (__predict_false(m->m_len < sizeof (struct ip))) {
3000 		if ((m = m_pullup(m, sizeof (struct ip))) == NULL) {
3001 			ipstat.ips_toosmall++;
3002 			goto bad;
3003 		}
3004 	}
3005 	ip = mtod(m, struct ip *);
3006 	if (ip == NULL) goto bad;
3007 
3008 	if (ip->ip_v != IPVERSION) {
3009 		ipstat.ips_badvers++;
3010 		goto bad;
3011 	}
3012 	hlen = ip->ip_hl << 2;
3013 	if (hlen < sizeof(struct ip)) { /* minimum header length */
3014 		ipstat.ips_badhlen++;
3015 		goto bad;
3016 	}
3017 	if (hlen > m->m_len) {
3018 		if ((m = m_pullup(m, hlen)) == 0) {
3019 			ipstat.ips_badhlen++;
3020 			goto bad;
3021 		}
3022 		ip = mtod(m, struct ip *);
3023 		if (ip == NULL) goto bad;
3024 	}
3025 
3026 	if (m->m_pkthdr.csum_flags & CSUM_IP_CHECKED) {
3027 		sum = !(m->m_pkthdr.csum_flags & CSUM_IP_VALID);
3028 	} else {
3029 		if (hlen == sizeof(struct ip)) {
3030 			sum = in_cksum_hdr(ip);
3031 		} else {
3032 			sum = in_cksum(m, hlen);
3033 		}
3034 	}
3035 	if (sum) {
3036 		ipstat.ips_badsum++;
3037 		goto bad;
3038 	}
3039 
3040 	/* Retrieve the packet length. */
3041 	len = ntohs(ip->ip_len);
3042 
3043 	/*
3044 	 * Check for additional length bogosity
3045 	 */
3046 	if (len < hlen) {
3047 		ipstat.ips_badlen++;
3048 		goto bad;
3049 	}
3050 
3051 	/*
3052 	 * Check that the amount of data in the buffers
3053 	 * is as at least much as the IP header would have us expect.
3054 	 * Drop packet if shorter than we expect.
3055 	 */
3056 	if (m->m_pkthdr.len < len) {
3057 		ipstat.ips_tooshort++;
3058 		goto bad;
3059 	}
3060 
3061 	/* Checks out, proceed */
3062 	*mp = m;
3063 	return (0);
3064 
3065 bad:
3066 	*mp = m;
3067 	return (-1);
3068 }
3069 
3070 #ifdef INET6
3071 /*
3072  * Same as above, but for IPv6.
3073  * Cut-and-pasted from ip6_input.c.
3074  * XXX Should we update ip6stat, or not?
3075  */
3076 static int
3077 bridge_ip6_checkbasic(struct mbuf **mp)
3078 {
3079 	struct mbuf *m = *mp;
3080 	struct ip6_hdr *ip6;
3081 
3082 	/*
3083 	 * If the IPv6 header is not aligned, slurp it up into a new
3084 	 * mbuf with space for link headers, in the event we forward
3085 	 * it.  Otherwise, if it is aligned, make sure the entire base
3086 	 * IPv6 header is in the first mbuf of the chain.
3087 	 */
3088 	if (IP6_HDR_ALIGNED_P(mtod(m, caddr_t)) == 0) {
3089 		struct ifnet *inifp = m->m_pkthdr.rcvif;
3090 		if ((m = m_copyup(m, sizeof(struct ip6_hdr),
3091 			    (max_linkhdr + 3) & ~3)) == NULL) {
3092 			/* XXXJRT new stat, please */
3093 			ip6stat.ip6s_toosmall++;
3094 			in6_ifstat_inc(inifp, ifs6_in_hdrerr);
3095 			goto bad;
3096 		}
3097 	} else if (__predict_false(m->m_len < sizeof(struct ip6_hdr))) {
3098 		struct ifnet *inifp = m->m_pkthdr.rcvif;
3099 		if ((m = m_pullup(m, sizeof(struct ip6_hdr))) == NULL) {
3100 			ip6stat.ip6s_toosmall++;
3101 			in6_ifstat_inc(inifp, ifs6_in_hdrerr);
3102 			goto bad;
3103 		}
3104 	}
3105 
3106 	ip6 = mtod(m, struct ip6_hdr *);
3107 
3108 	if ((ip6->ip6_vfc & IPV6_VERSION_MASK) != IPV6_VERSION) {
3109 		ip6stat.ip6s_badvers++;
3110 		in6_ifstat_inc(m->m_pkthdr.rcvif, ifs6_in_hdrerr);
3111 		goto bad;
3112 	}
3113 
3114 	/* Checks out, proceed */
3115 	*mp = m;
3116 	return (0);
3117 
3118 bad:
3119 	*mp = m;
3120 	return (-1);
3121 }
3122 #endif /* INET6 */
3123 
3124 /*
3125  * bridge_fragment:
3126  *
3127  *	Return a fragmented mbuf chain.
3128  */
3129 static int
3130 bridge_fragment(struct ifnet *ifp, struct mbuf *m, struct ether_header *eh,
3131     int snap, struct llc *llc)
3132 {
3133 	struct mbuf *m0;
3134 	struct ip *ip;
3135 	int error = -1;
3136 
3137 	if (m->m_len < sizeof(struct ip) &&
3138 	    (m = m_pullup(m, sizeof(struct ip))) == NULL)
3139 		goto out;
3140 	ip = mtod(m, struct ip *);
3141 
3142 	error = ip_fragment(ip, &m, ifp->if_mtu, ifp->if_hwassist,
3143 		    CSUM_DELAY_IP);
3144 	if (error)
3145 		goto out;
3146 
3147 	/* walk the chain and re-add the Ethernet header */
3148 	for (m0 = m; m0; m0 = m0->m_nextpkt) {
3149 		if (error == 0) {
3150 			if (snap) {
3151 				M_PREPEND(m0, sizeof(struct llc), M_DONTWAIT);
3152 				if (m0 == NULL) {
3153 					error = ENOBUFS;
3154 					continue;
3155 				}
3156 				bcopy(llc, mtod(m0, caddr_t),
3157 				    sizeof(struct llc));
3158 			}
3159 			M_PREPEND(m0, ETHER_HDR_LEN, M_DONTWAIT);
3160 			if (m0 == NULL) {
3161 				error = ENOBUFS;
3162 				continue;
3163 			}
3164 			bcopy(eh, mtod(m0, caddr_t), ETHER_HDR_LEN);
3165 		} else
3166 			m_freem(m);
3167 	}
3168 
3169 	if (error == 0)
3170 		ipstat.ips_fragmented++;
3171 
3172 	return (error);
3173 
3174 out:
3175 	if (m != NULL)
3176 		m_freem(m);
3177 	return (error);
3178 }
3179