xref: /freebsd/sys/net/if_bridge.c (revision d8e36cd2b10f78470c1de56337f685c10ce26ed2)
1 /*	$NetBSD: if_bridge.c,v 1.31 2005/06/01 19:45:34 jdc Exp $	*/
2 
3 /*-
4  * SPDX-License-Identifier: BSD-4-Clause
5  *
6  * Copyright 2001 Wasabi Systems, Inc.
7  * All rights reserved.
8  *
9  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  * 3. All advertising materials mentioning features or use of this software
20  *    must display the following acknowledgement:
21  *	This product includes software developed for the NetBSD Project by
22  *	Wasabi Systems, Inc.
23  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
24  *    or promote products derived from this software without specific prior
25  *    written permission.
26  *
27  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
28  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
31  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37  * POSSIBILITY OF SUCH DAMAGE.
38  */
39 
40 /*
41  * Copyright (c) 1999, 2000 Jason L. Wright (jason@thought.net)
42  * All rights reserved.
43  *
44  * Redistribution and use in source and binary forms, with or without
45  * modification, are permitted provided that the following conditions
46  * are met:
47  * 1. Redistributions of source code must retain the above copyright
48  *    notice, this list of conditions and the following disclaimer.
49  * 2. Redistributions in binary form must reproduce the above copyright
50  *    notice, this list of conditions and the following disclaimer in the
51  *    documentation and/or other materials provided with the distribution.
52  *
53  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
54  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
55  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
56  * DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
57  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
58  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
59  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
60  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
61  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
62  * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
63  * POSSIBILITY OF SUCH DAMAGE.
64  *
65  * OpenBSD: if_bridge.c,v 1.60 2001/06/15 03:38:33 itojun Exp
66  */
67 
68 /*
69  * Network interface bridge support.
70  *
71  * TODO:
72  *
73  *	- Currently only supports Ethernet-like interfaces (Ethernet,
74  *	  802.11, VLANs on Ethernet, etc.)  Figure out a nice way
75  *	  to bridge other types of interfaces (maybe consider
76  *	  heterogeneous bridges).
77  */
78 
79 #include <sys/cdefs.h>
80 __FBSDID("$FreeBSD$");
81 
82 #include "opt_inet.h"
83 #include "opt_inet6.h"
84 
85 #include <sys/param.h>
86 #include <sys/eventhandler.h>
87 #include <sys/mbuf.h>
88 #include <sys/malloc.h>
89 #include <sys/protosw.h>
90 #include <sys/systm.h>
91 #include <sys/jail.h>
92 #include <sys/time.h>
93 #include <sys/socket.h> /* for net/if.h */
94 #include <sys/sockio.h>
95 #include <sys/ctype.h>  /* string functions */
96 #include <sys/kernel.h>
97 #include <sys/random.h>
98 #include <sys/syslog.h>
99 #include <sys/sysctl.h>
100 #include <vm/uma.h>
101 #include <sys/module.h>
102 #include <sys/priv.h>
103 #include <sys/proc.h>
104 #include <sys/lock.h>
105 #include <sys/mutex.h>
106 
107 #include <net/bpf.h>
108 #include <net/if.h>
109 #include <net/if_clone.h>
110 #include <net/if_dl.h>
111 #include <net/if_types.h>
112 #include <net/if_var.h>
113 #include <net/if_private.h>
114 #include <net/pfil.h>
115 #include <net/vnet.h>
116 
117 #include <netinet/in.h>
118 #include <netinet/in_systm.h>
119 #include <netinet/in_var.h>
120 #include <netinet/ip.h>
121 #include <netinet/ip_var.h>
122 #ifdef INET6
123 #include <netinet/ip6.h>
124 #include <netinet6/ip6_var.h>
125 #include <netinet6/in6_ifattach.h>
126 #endif
127 #if defined(INET) || defined(INET6)
128 #include <netinet/ip_carp.h>
129 #endif
130 #include <machine/in_cksum.h>
131 #include <netinet/if_ether.h>
132 #include <net/bridgestp.h>
133 #include <net/if_bridgevar.h>
134 #include <net/if_llc.h>
135 #include <net/if_vlan_var.h>
136 
137 #include <net/route.h>
138 
139 /*
140  * Size of the route hash table.  Must be a power of two.
141  */
142 #ifndef BRIDGE_RTHASH_SIZE
143 #define	BRIDGE_RTHASH_SIZE		1024
144 #endif
145 
146 #define	BRIDGE_RTHASH_MASK		(BRIDGE_RTHASH_SIZE - 1)
147 
148 /*
149  * Default maximum number of addresses to cache.
150  */
151 #ifndef BRIDGE_RTABLE_MAX
152 #define	BRIDGE_RTABLE_MAX		2000
153 #endif
154 
155 /*
156  * Timeout (in seconds) for entries learned dynamically.
157  */
158 #ifndef BRIDGE_RTABLE_TIMEOUT
159 #define	BRIDGE_RTABLE_TIMEOUT		(20 * 60)	/* same as ARP */
160 #endif
161 
162 /*
163  * Number of seconds between walks of the route list.
164  */
165 #ifndef BRIDGE_RTABLE_PRUNE_PERIOD
166 #define	BRIDGE_RTABLE_PRUNE_PERIOD	(5 * 60)
167 #endif
168 
169 /*
170  * List of capabilities to possibly mask on the member interface.
171  */
172 #define	BRIDGE_IFCAPS_MASK		(IFCAP_TOE|IFCAP_TSO|IFCAP_TXCSUM|\
173 					 IFCAP_TXCSUM_IPV6)
174 
175 /*
176  * List of capabilities to strip
177  */
178 #define	BRIDGE_IFCAPS_STRIP		IFCAP_LRO
179 
180 /*
181  * Bridge locking
182  *
183  * The bridge relies heavily on the epoch(9) system to protect its data
184  * structures. This means we can safely use CK_LISTs while in NET_EPOCH, but we
185  * must ensure there is only one writer at a time.
186  *
187  * That is: for read accesses we only need to be in NET_EPOCH, but for write
188  * accesses we must hold:
189  *
190  *  - BRIDGE_RT_LOCK, for any change to bridge_rtnodes
191  *  - BRIDGE_LOCK, for any other change
192  *
193  * The BRIDGE_LOCK is a sleepable lock, because it is held across ioctl()
194  * calls to bridge member interfaces and these ioctl()s can sleep.
195  * The BRIDGE_RT_LOCK is a non-sleepable mutex, because it is sometimes
196  * required while we're in NET_EPOCH and then we're not allowed to sleep.
197  */
198 #define BRIDGE_LOCK_INIT(_sc)		do {			\
199 	sx_init(&(_sc)->sc_sx, "if_bridge");			\
200 	mtx_init(&(_sc)->sc_rt_mtx, "if_bridge rt", NULL, MTX_DEF);	\
201 } while (0)
202 #define BRIDGE_LOCK_DESTROY(_sc)	do {	\
203 	sx_destroy(&(_sc)->sc_sx);		\
204 	mtx_destroy(&(_sc)->sc_rt_mtx);		\
205 } while (0)
206 #define BRIDGE_LOCK(_sc)		sx_xlock(&(_sc)->sc_sx)
207 #define BRIDGE_UNLOCK(_sc)		sx_xunlock(&(_sc)->sc_sx)
208 #define BRIDGE_LOCK_ASSERT(_sc)		sx_assert(&(_sc)->sc_sx, SX_XLOCKED)
209 #define BRIDGE_LOCK_OR_NET_EPOCH_ASSERT(_sc)	\
210 	    MPASS(in_epoch(net_epoch_preempt) || sx_xlocked(&(_sc)->sc_sx))
211 #define BRIDGE_UNLOCK_ASSERT(_sc)	sx_assert(&(_sc)->sc_sx, SX_UNLOCKED)
212 #define BRIDGE_RT_LOCK(_sc)		mtx_lock(&(_sc)->sc_rt_mtx)
213 #define BRIDGE_RT_UNLOCK(_sc)		mtx_unlock(&(_sc)->sc_rt_mtx)
214 #define BRIDGE_RT_LOCK_ASSERT(_sc)	mtx_assert(&(_sc)->sc_rt_mtx, MA_OWNED)
215 #define BRIDGE_RT_LOCK_OR_NET_EPOCH_ASSERT(_sc)	\
216 	    MPASS(in_epoch(net_epoch_preempt) || mtx_owned(&(_sc)->sc_rt_mtx))
217 
218 /*
219  * Bridge interface list entry.
220  */
221 struct bridge_iflist {
222 	CK_LIST_ENTRY(bridge_iflist) bif_next;
223 	struct ifnet		*bif_ifp;	/* member if */
224 	struct bstp_port	bif_stp;	/* STP state */
225 	uint32_t		bif_flags;	/* member if flags */
226 	int			bif_savedcaps;	/* saved capabilities */
227 	uint32_t		bif_addrmax;	/* max # of addresses */
228 	uint32_t		bif_addrcnt;	/* cur. # of addresses */
229 	uint32_t		bif_addrexceeded;/* # of address violations */
230 	struct epoch_context	bif_epoch_ctx;
231 };
232 
233 /*
234  * Bridge route node.
235  */
236 struct bridge_rtnode {
237 	CK_LIST_ENTRY(bridge_rtnode) brt_hash;	/* hash table linkage */
238 	CK_LIST_ENTRY(bridge_rtnode) brt_list;	/* list linkage */
239 	struct bridge_iflist	*brt_dst;	/* destination if */
240 	unsigned long		brt_expire;	/* expiration time */
241 	uint8_t			brt_flags;	/* address flags */
242 	uint8_t			brt_addr[ETHER_ADDR_LEN];
243 	uint16_t		brt_vlan;	/* vlan id */
244 	struct	vnet		*brt_vnet;
245 	struct	epoch_context	brt_epoch_ctx;
246 };
247 #define	brt_ifp			brt_dst->bif_ifp
248 
249 /*
250  * Software state for each bridge.
251  */
252 struct bridge_softc {
253 	struct ifnet		*sc_ifp;	/* make this an interface */
254 	LIST_ENTRY(bridge_softc) sc_list;
255 	struct sx		sc_sx;
256 	struct mtx		sc_rt_mtx;
257 	uint32_t		sc_brtmax;	/* max # of addresses */
258 	uint32_t		sc_brtcnt;	/* cur. # of addresses */
259 	uint32_t		sc_brttimeout;	/* rt timeout in seconds */
260 	struct callout		sc_brcallout;	/* bridge callout */
261 	CK_LIST_HEAD(, bridge_iflist) sc_iflist;	/* member interface list */
262 	CK_LIST_HEAD(, bridge_rtnode) *sc_rthash;	/* our forwarding table */
263 	CK_LIST_HEAD(, bridge_rtnode) sc_rtlist;	/* list version of above */
264 	uint32_t		sc_rthash_key;	/* key for hash */
265 	CK_LIST_HEAD(, bridge_iflist) sc_spanlist;	/* span ports list */
266 	struct bstp_state	sc_stp;		/* STP state */
267 	uint32_t		sc_brtexceeded;	/* # of cache drops */
268 	struct ifnet		*sc_ifaddr;	/* member mac copied from */
269 	struct ether_addr	sc_defaddr;	/* Default MAC address */
270 	if_input_fn_t		sc_if_input;	/* Saved copy of if_input */
271 	struct epoch_context	sc_epoch_ctx;
272 };
273 
274 VNET_DEFINE_STATIC(struct sx, bridge_list_sx);
275 #define	V_bridge_list_sx	VNET(bridge_list_sx)
276 static eventhandler_tag bridge_detach_cookie;
277 
278 int	bridge_rtable_prune_period = BRIDGE_RTABLE_PRUNE_PERIOD;
279 
280 VNET_DEFINE_STATIC(uma_zone_t, bridge_rtnode_zone);
281 #define	V_bridge_rtnode_zone	VNET(bridge_rtnode_zone)
282 
283 static int	bridge_clone_create(struct if_clone *, char *, size_t,
284 		    struct ifc_data *, struct ifnet **);
285 static int	bridge_clone_destroy(struct if_clone *, struct ifnet *, uint32_t);
286 
287 static int	bridge_ioctl(struct ifnet *, u_long, caddr_t);
288 static void	bridge_mutecaps(struct bridge_softc *);
289 static void	bridge_set_ifcap(struct bridge_softc *, struct bridge_iflist *,
290 		    int);
291 static void	bridge_ifdetach(void *arg __unused, struct ifnet *);
292 static void	bridge_init(void *);
293 static void	bridge_dummynet(struct mbuf *, struct ifnet *);
294 static void	bridge_stop(struct ifnet *, int);
295 static int	bridge_transmit(struct ifnet *, struct mbuf *);
296 #ifdef ALTQ
297 static void	bridge_altq_start(if_t);
298 static int	bridge_altq_transmit(if_t, struct mbuf *);
299 #endif
300 static void	bridge_qflush(struct ifnet *);
301 static struct mbuf *bridge_input(struct ifnet *, struct mbuf *);
302 static void	bridge_inject(struct ifnet *, struct mbuf *);
303 static int	bridge_output(struct ifnet *, struct mbuf *, struct sockaddr *,
304 		    struct rtentry *);
305 static int	bridge_enqueue(struct bridge_softc *, struct ifnet *,
306 		    struct mbuf *);
307 static void	bridge_rtdelete(struct bridge_softc *, struct ifnet *ifp, int);
308 
309 static void	bridge_forward(struct bridge_softc *, struct bridge_iflist *,
310 		    struct mbuf *m);
311 
312 static void	bridge_timer(void *);
313 
314 static void	bridge_broadcast(struct bridge_softc *, struct ifnet *,
315 		    struct mbuf *, int);
316 static void	bridge_span(struct bridge_softc *, struct mbuf *);
317 
318 static int	bridge_rtupdate(struct bridge_softc *, const uint8_t *,
319 		    uint16_t, struct bridge_iflist *, int, uint8_t);
320 static struct ifnet *bridge_rtlookup(struct bridge_softc *, const uint8_t *,
321 		    uint16_t);
322 static void	bridge_rttrim(struct bridge_softc *);
323 static void	bridge_rtage(struct bridge_softc *);
324 static void	bridge_rtflush(struct bridge_softc *, int);
325 static int	bridge_rtdaddr(struct bridge_softc *, const uint8_t *,
326 		    uint16_t);
327 
328 static void	bridge_rtable_init(struct bridge_softc *);
329 static void	bridge_rtable_fini(struct bridge_softc *);
330 
331 static int	bridge_rtnode_addr_cmp(const uint8_t *, const uint8_t *);
332 static struct bridge_rtnode *bridge_rtnode_lookup(struct bridge_softc *,
333 		    const uint8_t *, uint16_t);
334 static int	bridge_rtnode_insert(struct bridge_softc *,
335 		    struct bridge_rtnode *);
336 static void	bridge_rtnode_destroy(struct bridge_softc *,
337 		    struct bridge_rtnode *);
338 static void	bridge_rtable_expire(struct ifnet *, int);
339 static void	bridge_state_change(struct ifnet *, int);
340 
341 static struct bridge_iflist *bridge_lookup_member(struct bridge_softc *,
342 		    const char *name);
343 static struct bridge_iflist *bridge_lookup_member_if(struct bridge_softc *,
344 		    struct ifnet *ifp);
345 static void	bridge_delete_member(struct bridge_softc *,
346 		    struct bridge_iflist *, int);
347 static void	bridge_delete_span(struct bridge_softc *,
348 		    struct bridge_iflist *);
349 
350 static int	bridge_ioctl_add(struct bridge_softc *, void *);
351 static int	bridge_ioctl_del(struct bridge_softc *, void *);
352 static int	bridge_ioctl_gifflags(struct bridge_softc *, void *);
353 static int	bridge_ioctl_sifflags(struct bridge_softc *, void *);
354 static int	bridge_ioctl_scache(struct bridge_softc *, void *);
355 static int	bridge_ioctl_gcache(struct bridge_softc *, void *);
356 static int	bridge_ioctl_gifs(struct bridge_softc *, void *);
357 static int	bridge_ioctl_rts(struct bridge_softc *, void *);
358 static int	bridge_ioctl_saddr(struct bridge_softc *, void *);
359 static int	bridge_ioctl_sto(struct bridge_softc *, void *);
360 static int	bridge_ioctl_gto(struct bridge_softc *, void *);
361 static int	bridge_ioctl_daddr(struct bridge_softc *, void *);
362 static int	bridge_ioctl_flush(struct bridge_softc *, void *);
363 static int	bridge_ioctl_gpri(struct bridge_softc *, void *);
364 static int	bridge_ioctl_spri(struct bridge_softc *, void *);
365 static int	bridge_ioctl_ght(struct bridge_softc *, void *);
366 static int	bridge_ioctl_sht(struct bridge_softc *, void *);
367 static int	bridge_ioctl_gfd(struct bridge_softc *, void *);
368 static int	bridge_ioctl_sfd(struct bridge_softc *, void *);
369 static int	bridge_ioctl_gma(struct bridge_softc *, void *);
370 static int	bridge_ioctl_sma(struct bridge_softc *, void *);
371 static int	bridge_ioctl_sifprio(struct bridge_softc *, void *);
372 static int	bridge_ioctl_sifcost(struct bridge_softc *, void *);
373 static int	bridge_ioctl_sifmaxaddr(struct bridge_softc *, void *);
374 static int	bridge_ioctl_addspan(struct bridge_softc *, void *);
375 static int	bridge_ioctl_delspan(struct bridge_softc *, void *);
376 static int	bridge_ioctl_gbparam(struct bridge_softc *, void *);
377 static int	bridge_ioctl_grte(struct bridge_softc *, void *);
378 static int	bridge_ioctl_gifsstp(struct bridge_softc *, void *);
379 static int	bridge_ioctl_sproto(struct bridge_softc *, void *);
380 static int	bridge_ioctl_stxhc(struct bridge_softc *, void *);
381 static int	bridge_pfil(struct mbuf **, struct ifnet *, struct ifnet *,
382 		    int);
383 static int	bridge_ip_checkbasic(struct mbuf **mp);
384 #ifdef INET6
385 static int	bridge_ip6_checkbasic(struct mbuf **mp);
386 #endif /* INET6 */
387 static int	bridge_fragment(struct ifnet *, struct mbuf **mp,
388 		    struct ether_header *, int, struct llc *);
389 static void	bridge_linkstate(struct ifnet *ifp);
390 static void	bridge_linkcheck(struct bridge_softc *sc);
391 
392 /* The default bridge vlan is 1 (IEEE 802.1Q-2003 Table 9-2) */
393 #define	VLANTAGOF(_m)	\
394     (_m->m_flags & M_VLANTAG) ? EVL_VLANOFTAG(_m->m_pkthdr.ether_vtag) : 1
395 
396 static struct bstp_cb_ops bridge_ops = {
397 	.bcb_state = bridge_state_change,
398 	.bcb_rtage = bridge_rtable_expire
399 };
400 
401 SYSCTL_DECL(_net_link);
402 static SYSCTL_NODE(_net_link, IFT_BRIDGE, bridge, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
403     "Bridge");
404 
405 /* only pass IP[46] packets when pfil is enabled */
406 VNET_DEFINE_STATIC(int, pfil_onlyip) = 1;
407 #define	V_pfil_onlyip	VNET(pfil_onlyip)
408 SYSCTL_INT(_net_link_bridge, OID_AUTO, pfil_onlyip,
409     CTLFLAG_RWTUN | CTLFLAG_VNET, &VNET_NAME(pfil_onlyip), 0,
410     "Only pass IP packets when pfil is enabled");
411 
412 /* run pfil hooks on the bridge interface */
413 VNET_DEFINE_STATIC(int, pfil_bridge) = 0;
414 #define	V_pfil_bridge	VNET(pfil_bridge)
415 SYSCTL_INT(_net_link_bridge, OID_AUTO, pfil_bridge,
416     CTLFLAG_RWTUN | CTLFLAG_VNET, &VNET_NAME(pfil_bridge), 0,
417     "Packet filter on the bridge interface");
418 
419 /* layer2 filter with ipfw */
420 VNET_DEFINE_STATIC(int, pfil_ipfw);
421 #define	V_pfil_ipfw	VNET(pfil_ipfw)
422 
423 /* layer2 ARP filter with ipfw */
424 VNET_DEFINE_STATIC(int, pfil_ipfw_arp);
425 #define	V_pfil_ipfw_arp	VNET(pfil_ipfw_arp)
426 SYSCTL_INT(_net_link_bridge, OID_AUTO, ipfw_arp,
427     CTLFLAG_RWTUN | CTLFLAG_VNET, &VNET_NAME(pfil_ipfw_arp), 0,
428     "Filter ARP packets through IPFW layer2");
429 
430 /* run pfil hooks on the member interface */
431 VNET_DEFINE_STATIC(int, pfil_member) = 0;
432 #define	V_pfil_member	VNET(pfil_member)
433 SYSCTL_INT(_net_link_bridge, OID_AUTO, pfil_member,
434     CTLFLAG_RWTUN | CTLFLAG_VNET, &VNET_NAME(pfil_member), 0,
435     "Packet filter on the member interface");
436 
437 /* run pfil hooks on the physical interface for locally destined packets */
438 VNET_DEFINE_STATIC(int, pfil_local_phys);
439 #define	V_pfil_local_phys	VNET(pfil_local_phys)
440 SYSCTL_INT(_net_link_bridge, OID_AUTO, pfil_local_phys,
441     CTLFLAG_RWTUN | CTLFLAG_VNET, &VNET_NAME(pfil_local_phys), 0,
442     "Packet filter on the physical interface for locally destined packets");
443 
444 /* log STP state changes */
445 VNET_DEFINE_STATIC(int, log_stp);
446 #define	V_log_stp	VNET(log_stp)
447 SYSCTL_INT(_net_link_bridge, OID_AUTO, log_stp,
448     CTLFLAG_RWTUN | CTLFLAG_VNET, &VNET_NAME(log_stp), 0,
449     "Log STP state changes");
450 
451 /* share MAC with first bridge member */
452 VNET_DEFINE_STATIC(int, bridge_inherit_mac);
453 #define	V_bridge_inherit_mac	VNET(bridge_inherit_mac)
454 SYSCTL_INT(_net_link_bridge, OID_AUTO, inherit_mac,
455     CTLFLAG_RWTUN | CTLFLAG_VNET, &VNET_NAME(bridge_inherit_mac), 0,
456     "Inherit MAC address from the first bridge member");
457 
458 VNET_DEFINE_STATIC(int, allow_llz_overlap) = 0;
459 #define	V_allow_llz_overlap	VNET(allow_llz_overlap)
460 SYSCTL_INT(_net_link_bridge, OID_AUTO, allow_llz_overlap,
461     CTLFLAG_RW | CTLFLAG_VNET, &VNET_NAME(allow_llz_overlap), 0,
462     "Allow overlap of link-local scope "
463     "zones of a bridge interface and the member interfaces");
464 
465 /* log MAC address port flapping */
466 VNET_DEFINE_STATIC(bool, log_mac_flap) = true;
467 #define	V_log_mac_flap	VNET(log_mac_flap)
468 SYSCTL_BOOL(_net_link_bridge, OID_AUTO, log_mac_flap,
469     CTLFLAG_RW | CTLFLAG_VNET, &VNET_NAME(log_mac_flap), true,
470     "Log MAC address port flapping");
471 
472 VNET_DEFINE_STATIC(int, log_interval) = 5;
473 VNET_DEFINE_STATIC(int, log_count) = 0;
474 VNET_DEFINE_STATIC(struct timeval, log_last) = { 0 };
475 
476 #define	V_log_interval	VNET(log_interval)
477 #define	V_log_count	VNET(log_count)
478 #define	V_log_last	VNET(log_last)
479 
480 struct bridge_control {
481 	int	(*bc_func)(struct bridge_softc *, void *);
482 	int	bc_argsize;
483 	int	bc_flags;
484 };
485 
486 #define	BC_F_COPYIN		0x01	/* copy arguments in */
487 #define	BC_F_COPYOUT		0x02	/* copy arguments out */
488 #define	BC_F_SUSER		0x04	/* do super-user check */
489 
490 static const struct bridge_control bridge_control_table[] = {
491 	{ bridge_ioctl_add,		sizeof(struct ifbreq),
492 	  BC_F_COPYIN|BC_F_SUSER },
493 	{ bridge_ioctl_del,		sizeof(struct ifbreq),
494 	  BC_F_COPYIN|BC_F_SUSER },
495 
496 	{ bridge_ioctl_gifflags,	sizeof(struct ifbreq),
497 	  BC_F_COPYIN|BC_F_COPYOUT },
498 	{ bridge_ioctl_sifflags,	sizeof(struct ifbreq),
499 	  BC_F_COPYIN|BC_F_SUSER },
500 
501 	{ bridge_ioctl_scache,		sizeof(struct ifbrparam),
502 	  BC_F_COPYIN|BC_F_SUSER },
503 	{ bridge_ioctl_gcache,		sizeof(struct ifbrparam),
504 	  BC_F_COPYOUT },
505 
506 	{ bridge_ioctl_gifs,		sizeof(struct ifbifconf),
507 	  BC_F_COPYIN|BC_F_COPYOUT },
508 	{ bridge_ioctl_rts,		sizeof(struct ifbaconf),
509 	  BC_F_COPYIN|BC_F_COPYOUT },
510 
511 	{ bridge_ioctl_saddr,		sizeof(struct ifbareq),
512 	  BC_F_COPYIN|BC_F_SUSER },
513 
514 	{ bridge_ioctl_sto,		sizeof(struct ifbrparam),
515 	  BC_F_COPYIN|BC_F_SUSER },
516 	{ bridge_ioctl_gto,		sizeof(struct ifbrparam),
517 	  BC_F_COPYOUT },
518 
519 	{ bridge_ioctl_daddr,		sizeof(struct ifbareq),
520 	  BC_F_COPYIN|BC_F_SUSER },
521 
522 	{ bridge_ioctl_flush,		sizeof(struct ifbreq),
523 	  BC_F_COPYIN|BC_F_SUSER },
524 
525 	{ bridge_ioctl_gpri,		sizeof(struct ifbrparam),
526 	  BC_F_COPYOUT },
527 	{ bridge_ioctl_spri,		sizeof(struct ifbrparam),
528 	  BC_F_COPYIN|BC_F_SUSER },
529 
530 	{ bridge_ioctl_ght,		sizeof(struct ifbrparam),
531 	  BC_F_COPYOUT },
532 	{ bridge_ioctl_sht,		sizeof(struct ifbrparam),
533 	  BC_F_COPYIN|BC_F_SUSER },
534 
535 	{ bridge_ioctl_gfd,		sizeof(struct ifbrparam),
536 	  BC_F_COPYOUT },
537 	{ bridge_ioctl_sfd,		sizeof(struct ifbrparam),
538 	  BC_F_COPYIN|BC_F_SUSER },
539 
540 	{ bridge_ioctl_gma,		sizeof(struct ifbrparam),
541 	  BC_F_COPYOUT },
542 	{ bridge_ioctl_sma,		sizeof(struct ifbrparam),
543 	  BC_F_COPYIN|BC_F_SUSER },
544 
545 	{ bridge_ioctl_sifprio,		sizeof(struct ifbreq),
546 	  BC_F_COPYIN|BC_F_SUSER },
547 
548 	{ bridge_ioctl_sifcost,		sizeof(struct ifbreq),
549 	  BC_F_COPYIN|BC_F_SUSER },
550 
551 	{ bridge_ioctl_addspan,		sizeof(struct ifbreq),
552 	  BC_F_COPYIN|BC_F_SUSER },
553 	{ bridge_ioctl_delspan,		sizeof(struct ifbreq),
554 	  BC_F_COPYIN|BC_F_SUSER },
555 
556 	{ bridge_ioctl_gbparam,		sizeof(struct ifbropreq),
557 	  BC_F_COPYOUT },
558 
559 	{ bridge_ioctl_grte,		sizeof(struct ifbrparam),
560 	  BC_F_COPYOUT },
561 
562 	{ bridge_ioctl_gifsstp,		sizeof(struct ifbpstpconf),
563 	  BC_F_COPYIN|BC_F_COPYOUT },
564 
565 	{ bridge_ioctl_sproto,		sizeof(struct ifbrparam),
566 	  BC_F_COPYIN|BC_F_SUSER },
567 
568 	{ bridge_ioctl_stxhc,		sizeof(struct ifbrparam),
569 	  BC_F_COPYIN|BC_F_SUSER },
570 
571 	{ bridge_ioctl_sifmaxaddr,	sizeof(struct ifbreq),
572 	  BC_F_COPYIN|BC_F_SUSER },
573 
574 };
575 static const int bridge_control_table_size = nitems(bridge_control_table);
576 
577 VNET_DEFINE_STATIC(LIST_HEAD(, bridge_softc), bridge_list);
578 #define	V_bridge_list	VNET(bridge_list)
579 #define	BRIDGE_LIST_LOCK_INIT(x)	sx_init(&V_bridge_list_sx,	\
580 					    "if_bridge list")
581 #define	BRIDGE_LIST_LOCK_DESTROY(x)	sx_destroy(&V_bridge_list_sx)
582 #define	BRIDGE_LIST_LOCK(x)		sx_xlock(&V_bridge_list_sx)
583 #define	BRIDGE_LIST_UNLOCK(x)		sx_xunlock(&V_bridge_list_sx)
584 
585 VNET_DEFINE_STATIC(struct if_clone *, bridge_cloner);
586 #define	V_bridge_cloner	VNET(bridge_cloner)
587 
588 static const char bridge_name[] = "bridge";
589 
590 static void
591 vnet_bridge_init(const void *unused __unused)
592 {
593 
594 	V_bridge_rtnode_zone = uma_zcreate("bridge_rtnode",
595 	    sizeof(struct bridge_rtnode), NULL, NULL, NULL, NULL,
596 	    UMA_ALIGN_PTR, 0);
597 	BRIDGE_LIST_LOCK_INIT();
598 	LIST_INIT(&V_bridge_list);
599 
600 	struct if_clone_addreq req = {
601 		.create_f = bridge_clone_create,
602 		.destroy_f = bridge_clone_destroy,
603 		.flags = IFC_F_AUTOUNIT,
604 	};
605 	V_bridge_cloner = ifc_attach_cloner(bridge_name, &req);
606 }
607 VNET_SYSINIT(vnet_bridge_init, SI_SUB_PROTO_IFATTACHDOMAIN, SI_ORDER_ANY,
608     vnet_bridge_init, NULL);
609 
610 static void
611 vnet_bridge_uninit(const void *unused __unused)
612 {
613 
614 	ifc_detach_cloner(V_bridge_cloner);
615 	V_bridge_cloner = NULL;
616 	BRIDGE_LIST_LOCK_DESTROY();
617 
618 	/* Callbacks may use the UMA zone. */
619 	NET_EPOCH_DRAIN_CALLBACKS();
620 
621 	uma_zdestroy(V_bridge_rtnode_zone);
622 }
623 VNET_SYSUNINIT(vnet_bridge_uninit, SI_SUB_PSEUDO, SI_ORDER_ANY,
624     vnet_bridge_uninit, NULL);
625 
626 static int
627 bridge_modevent(module_t mod, int type, void *data)
628 {
629 
630 	switch (type) {
631 	case MOD_LOAD:
632 		bridge_dn_p = bridge_dummynet;
633 		bridge_detach_cookie = EVENTHANDLER_REGISTER(
634 		    ifnet_departure_event, bridge_ifdetach, NULL,
635 		    EVENTHANDLER_PRI_ANY);
636 		break;
637 	case MOD_UNLOAD:
638 		EVENTHANDLER_DEREGISTER(ifnet_departure_event,
639 		    bridge_detach_cookie);
640 		bridge_dn_p = NULL;
641 		break;
642 	default:
643 		return (EOPNOTSUPP);
644 	}
645 	return (0);
646 }
647 
648 static moduledata_t bridge_mod = {
649 	"if_bridge",
650 	bridge_modevent,
651 	0
652 };
653 
654 DECLARE_MODULE(if_bridge, bridge_mod, SI_SUB_PSEUDO, SI_ORDER_ANY);
655 MODULE_VERSION(if_bridge, 1);
656 MODULE_DEPEND(if_bridge, bridgestp, 1, 1, 1);
657 
658 /*
659  * handler for net.link.bridge.ipfw
660  */
661 static int
662 sysctl_pfil_ipfw(SYSCTL_HANDLER_ARGS)
663 {
664 	int enable = V_pfil_ipfw;
665 	int error;
666 
667 	error = sysctl_handle_int(oidp, &enable, 0, req);
668 	enable &= 1;
669 
670 	if (enable != V_pfil_ipfw) {
671 		V_pfil_ipfw = enable;
672 
673 		/*
674 		 * Disable pfil so that ipfw doesnt run twice, if the user
675 		 * really wants both then they can re-enable pfil_bridge and/or
676 		 * pfil_member. Also allow non-ip packets as ipfw can filter by
677 		 * layer2 type.
678 		 */
679 		if (V_pfil_ipfw) {
680 			V_pfil_onlyip = 0;
681 			V_pfil_bridge = 0;
682 			V_pfil_member = 0;
683 		}
684 	}
685 
686 	return (error);
687 }
688 SYSCTL_PROC(_net_link_bridge, OID_AUTO, ipfw,
689     CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_VNET | CTLFLAG_NEEDGIANT,
690     &VNET_NAME(pfil_ipfw), 0, &sysctl_pfil_ipfw, "I",
691     "Layer2 filter with IPFW");
692 
693 #ifdef VIMAGE
694 static void
695 bridge_reassign(struct ifnet *ifp, struct vnet *newvnet, char *arg)
696 {
697 	struct bridge_softc *sc = ifp->if_softc;
698 	struct bridge_iflist *bif;
699 
700 	BRIDGE_LOCK(sc);
701 
702 	while ((bif = CK_LIST_FIRST(&sc->sc_iflist)) != NULL)
703 		bridge_delete_member(sc, bif, 0);
704 
705 	while ((bif = CK_LIST_FIRST(&sc->sc_spanlist)) != NULL) {
706 		bridge_delete_span(sc, bif);
707 	}
708 
709 	BRIDGE_UNLOCK(sc);
710 
711 	ether_reassign(ifp, newvnet, arg);
712 }
713 #endif
714 
715 /*
716  * bridge_clone_create:
717  *
718  *	Create a new bridge instance.
719  */
720 static int
721 bridge_clone_create(struct if_clone *ifc, char *name, size_t len,
722     struct ifc_data *ifd, struct ifnet **ifpp)
723 {
724 	struct bridge_softc *sc;
725 	struct ifnet *ifp;
726 
727 	sc = malloc(sizeof(*sc), M_DEVBUF, M_WAITOK|M_ZERO);
728 	ifp = sc->sc_ifp = if_alloc(IFT_ETHER);
729 	if (ifp == NULL) {
730 		free(sc, M_DEVBUF);
731 		return (ENOSPC);
732 	}
733 
734 	BRIDGE_LOCK_INIT(sc);
735 	sc->sc_brtmax = BRIDGE_RTABLE_MAX;
736 	sc->sc_brttimeout = BRIDGE_RTABLE_TIMEOUT;
737 
738 	/* Initialize our routing table. */
739 	bridge_rtable_init(sc);
740 
741 	callout_init_mtx(&sc->sc_brcallout, &sc->sc_rt_mtx, 0);
742 
743 	CK_LIST_INIT(&sc->sc_iflist);
744 	CK_LIST_INIT(&sc->sc_spanlist);
745 
746 	ifp->if_softc = sc;
747 	if_initname(ifp, bridge_name, ifd->unit);
748 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
749 	ifp->if_ioctl = bridge_ioctl;
750 #ifdef ALTQ
751 	ifp->if_start = bridge_altq_start;
752 	ifp->if_transmit = bridge_altq_transmit;
753 	IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen);
754 	ifp->if_snd.ifq_drv_maxlen = 0;
755 	IFQ_SET_READY(&ifp->if_snd);
756 #else
757 	ifp->if_transmit = bridge_transmit;
758 #endif
759 	ifp->if_qflush = bridge_qflush;
760 	ifp->if_init = bridge_init;
761 	ifp->if_type = IFT_BRIDGE;
762 
763 	ether_gen_addr(ifp, &sc->sc_defaddr);
764 
765 	bstp_attach(&sc->sc_stp, &bridge_ops);
766 	ether_ifattach(ifp, sc->sc_defaddr.octet);
767 	/* Now undo some of the damage... */
768 	ifp->if_baudrate = 0;
769 	ifp->if_type = IFT_BRIDGE;
770 #ifdef VIMAGE
771 	ifp->if_reassign = bridge_reassign;
772 #endif
773 	sc->sc_if_input = ifp->if_input;	/* ether_input */
774 	ifp->if_input = bridge_inject;
775 
776 	/*
777 	 * Allow BRIDGE_INPUT() to pass in packets originating from the bridge
778 	 * itself via bridge_inject().  This is required for netmap but
779 	 * otherwise has no effect.
780 	 */
781 	ifp->if_bridge_input = bridge_input;
782 
783 	BRIDGE_LIST_LOCK();
784 	LIST_INSERT_HEAD(&V_bridge_list, sc, sc_list);
785 	BRIDGE_LIST_UNLOCK();
786 	*ifpp = ifp;
787 
788 	return (0);
789 }
790 
791 static void
792 bridge_clone_destroy_cb(struct epoch_context *ctx)
793 {
794 	struct bridge_softc *sc;
795 
796 	sc = __containerof(ctx, struct bridge_softc, sc_epoch_ctx);
797 
798 	BRIDGE_LOCK_DESTROY(sc);
799 	free(sc, M_DEVBUF);
800 }
801 
802 /*
803  * bridge_clone_destroy:
804  *
805  *	Destroy a bridge instance.
806  */
807 static int
808 bridge_clone_destroy(struct if_clone *ifc, struct ifnet *ifp, uint32_t flags)
809 {
810 	struct bridge_softc *sc = ifp->if_softc;
811 	struct bridge_iflist *bif;
812 	struct epoch_tracker et;
813 
814 	BRIDGE_LOCK(sc);
815 
816 	bridge_stop(ifp, 1);
817 	ifp->if_flags &= ~IFF_UP;
818 
819 	while ((bif = CK_LIST_FIRST(&sc->sc_iflist)) != NULL)
820 		bridge_delete_member(sc, bif, 0);
821 
822 	while ((bif = CK_LIST_FIRST(&sc->sc_spanlist)) != NULL) {
823 		bridge_delete_span(sc, bif);
824 	}
825 
826 	/* Tear down the routing table. */
827 	bridge_rtable_fini(sc);
828 
829 	BRIDGE_UNLOCK(sc);
830 
831 	NET_EPOCH_ENTER(et);
832 
833 	callout_drain(&sc->sc_brcallout);
834 
835 	BRIDGE_LIST_LOCK();
836 	LIST_REMOVE(sc, sc_list);
837 	BRIDGE_LIST_UNLOCK();
838 
839 	bstp_detach(&sc->sc_stp);
840 #ifdef ALTQ
841 	IFQ_PURGE(&ifp->if_snd);
842 #endif
843 	NET_EPOCH_EXIT(et);
844 
845 	ether_ifdetach(ifp);
846 	if_free(ifp);
847 
848 	NET_EPOCH_CALL(bridge_clone_destroy_cb, &sc->sc_epoch_ctx);
849 
850 	return (0);
851 }
852 
853 /*
854  * bridge_ioctl:
855  *
856  *	Handle a control request from the operator.
857  */
858 static int
859 bridge_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
860 {
861 	struct bridge_softc *sc = ifp->if_softc;
862 	struct ifreq *ifr = (struct ifreq *)data;
863 	struct bridge_iflist *bif;
864 	struct thread *td = curthread;
865 	union {
866 		struct ifbreq ifbreq;
867 		struct ifbifconf ifbifconf;
868 		struct ifbareq ifbareq;
869 		struct ifbaconf ifbaconf;
870 		struct ifbrparam ifbrparam;
871 		struct ifbropreq ifbropreq;
872 	} args;
873 	struct ifdrv *ifd = (struct ifdrv *) data;
874 	const struct bridge_control *bc;
875 	int error = 0, oldmtu;
876 
877 	BRIDGE_LOCK(sc);
878 
879 	switch (cmd) {
880 	case SIOCADDMULTI:
881 	case SIOCDELMULTI:
882 		break;
883 
884 	case SIOCGDRVSPEC:
885 	case SIOCSDRVSPEC:
886 		if (ifd->ifd_cmd >= bridge_control_table_size) {
887 			error = EINVAL;
888 			break;
889 		}
890 		bc = &bridge_control_table[ifd->ifd_cmd];
891 
892 		if (cmd == SIOCGDRVSPEC &&
893 		    (bc->bc_flags & BC_F_COPYOUT) == 0) {
894 			error = EINVAL;
895 			break;
896 		}
897 		else if (cmd == SIOCSDRVSPEC &&
898 		    (bc->bc_flags & BC_F_COPYOUT) != 0) {
899 			error = EINVAL;
900 			break;
901 		}
902 
903 		if (bc->bc_flags & BC_F_SUSER) {
904 			error = priv_check(td, PRIV_NET_BRIDGE);
905 			if (error)
906 				break;
907 		}
908 
909 		if (ifd->ifd_len != bc->bc_argsize ||
910 		    ifd->ifd_len > sizeof(args)) {
911 			error = EINVAL;
912 			break;
913 		}
914 
915 		bzero(&args, sizeof(args));
916 		if (bc->bc_flags & BC_F_COPYIN) {
917 			error = copyin(ifd->ifd_data, &args, ifd->ifd_len);
918 			if (error)
919 				break;
920 		}
921 
922 		oldmtu = ifp->if_mtu;
923 		error = (*bc->bc_func)(sc, &args);
924 		if (error)
925 			break;
926 
927 		/*
928 		 * Bridge MTU may change during addition of the first port.
929 		 * If it did, do network layer specific procedure.
930 		 */
931 		if (ifp->if_mtu != oldmtu)
932 			if_notifymtu(ifp);
933 
934 		if (bc->bc_flags & BC_F_COPYOUT)
935 			error = copyout(&args, ifd->ifd_data, ifd->ifd_len);
936 
937 		break;
938 
939 	case SIOCSIFFLAGS:
940 		if (!(ifp->if_flags & IFF_UP) &&
941 		    (ifp->if_drv_flags & IFF_DRV_RUNNING)) {
942 			/*
943 			 * If interface is marked down and it is running,
944 			 * then stop and disable it.
945 			 */
946 			bridge_stop(ifp, 1);
947 		} else if ((ifp->if_flags & IFF_UP) &&
948 		    !(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
949 			/*
950 			 * If interface is marked up and it is stopped, then
951 			 * start it.
952 			 */
953 			BRIDGE_UNLOCK(sc);
954 			(*ifp->if_init)(sc);
955 			BRIDGE_LOCK(sc);
956 		}
957 		break;
958 
959 	case SIOCSIFMTU:
960 		oldmtu = sc->sc_ifp->if_mtu;
961 
962 		if (ifr->ifr_mtu < 576) {
963 			error = EINVAL;
964 			break;
965 		}
966 		if (CK_LIST_EMPTY(&sc->sc_iflist)) {
967 			sc->sc_ifp->if_mtu = ifr->ifr_mtu;
968 			break;
969 		}
970 		CK_LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
971 			error = (*bif->bif_ifp->if_ioctl)(bif->bif_ifp,
972 			    SIOCSIFMTU, (caddr_t)ifr);
973 			if (error != 0) {
974 				log(LOG_NOTICE, "%s: invalid MTU: %u for"
975 				    " member %s\n", sc->sc_ifp->if_xname,
976 				    ifr->ifr_mtu,
977 				    bif->bif_ifp->if_xname);
978 				error = EINVAL;
979 				break;
980 			}
981 		}
982 		if (error) {
983 			/* Restore the previous MTU on all member interfaces. */
984 			ifr->ifr_mtu = oldmtu;
985 			CK_LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
986 				(*bif->bif_ifp->if_ioctl)(bif->bif_ifp,
987 				    SIOCSIFMTU, (caddr_t)ifr);
988 			}
989 		} else {
990 			sc->sc_ifp->if_mtu = ifr->ifr_mtu;
991 		}
992 		break;
993 	default:
994 		/*
995 		 * drop the lock as ether_ioctl() will call bridge_start() and
996 		 * cause the lock to be recursed.
997 		 */
998 		BRIDGE_UNLOCK(sc);
999 		error = ether_ioctl(ifp, cmd, data);
1000 		BRIDGE_LOCK(sc);
1001 		break;
1002 	}
1003 
1004 	BRIDGE_UNLOCK(sc);
1005 
1006 	return (error);
1007 }
1008 
1009 /*
1010  * bridge_mutecaps:
1011  *
1012  *	Clear or restore unwanted capabilities on the member interface
1013  */
1014 static void
1015 bridge_mutecaps(struct bridge_softc *sc)
1016 {
1017 	struct bridge_iflist *bif;
1018 	int enabled, mask;
1019 
1020 	BRIDGE_LOCK_ASSERT(sc);
1021 
1022 	/* Initial bitmask of capabilities to test */
1023 	mask = BRIDGE_IFCAPS_MASK;
1024 
1025 	CK_LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
1026 		/* Every member must support it or its disabled */
1027 		mask &= bif->bif_savedcaps;
1028 	}
1029 
1030 	CK_LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
1031 		enabled = bif->bif_ifp->if_capenable;
1032 		enabled &= ~BRIDGE_IFCAPS_STRIP;
1033 		/* strip off mask bits and enable them again if allowed */
1034 		enabled &= ~BRIDGE_IFCAPS_MASK;
1035 		enabled |= mask;
1036 		bridge_set_ifcap(sc, bif, enabled);
1037 	}
1038 }
1039 
1040 static void
1041 bridge_set_ifcap(struct bridge_softc *sc, struct bridge_iflist *bif, int set)
1042 {
1043 	struct ifnet *ifp = bif->bif_ifp;
1044 	struct ifreq ifr;
1045 	int error, mask, stuck;
1046 
1047 	bzero(&ifr, sizeof(ifr));
1048 	ifr.ifr_reqcap = set;
1049 
1050 	if (ifp->if_capenable != set) {
1051 		error = (*ifp->if_ioctl)(ifp, SIOCSIFCAP, (caddr_t)&ifr);
1052 		if (error)
1053 			if_printf(sc->sc_ifp,
1054 			    "error setting capabilities on %s: %d\n",
1055 			    ifp->if_xname, error);
1056 		mask = BRIDGE_IFCAPS_MASK | BRIDGE_IFCAPS_STRIP;
1057 		stuck = ifp->if_capenable & mask & ~set;
1058 		if (stuck != 0)
1059 			if_printf(sc->sc_ifp,
1060 			    "can't disable some capabilities on %s: 0x%x\n",
1061 			    ifp->if_xname, stuck);
1062 	}
1063 }
1064 
1065 /*
1066  * bridge_lookup_member:
1067  *
1068  *	Lookup a bridge member interface.
1069  */
1070 static struct bridge_iflist *
1071 bridge_lookup_member(struct bridge_softc *sc, const char *name)
1072 {
1073 	struct bridge_iflist *bif;
1074 	struct ifnet *ifp;
1075 
1076 	BRIDGE_LOCK_OR_NET_EPOCH_ASSERT(sc);
1077 
1078 	CK_LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
1079 		ifp = bif->bif_ifp;
1080 		if (strcmp(ifp->if_xname, name) == 0)
1081 			return (bif);
1082 	}
1083 
1084 	return (NULL);
1085 }
1086 
1087 /*
1088  * bridge_lookup_member_if:
1089  *
1090  *	Lookup a bridge member interface by ifnet*.
1091  */
1092 static struct bridge_iflist *
1093 bridge_lookup_member_if(struct bridge_softc *sc, struct ifnet *member_ifp)
1094 {
1095 	struct bridge_iflist *bif;
1096 
1097 	BRIDGE_LOCK_OR_NET_EPOCH_ASSERT(sc);
1098 
1099 	CK_LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
1100 		if (bif->bif_ifp == member_ifp)
1101 			return (bif);
1102 	}
1103 
1104 	return (NULL);
1105 }
1106 
1107 static void
1108 bridge_delete_member_cb(struct epoch_context *ctx)
1109 {
1110 	struct bridge_iflist *bif;
1111 
1112 	bif = __containerof(ctx, struct bridge_iflist, bif_epoch_ctx);
1113 
1114 	free(bif, M_DEVBUF);
1115 }
1116 
1117 /*
1118  * bridge_delete_member:
1119  *
1120  *	Delete the specified member interface.
1121  */
1122 static void
1123 bridge_delete_member(struct bridge_softc *sc, struct bridge_iflist *bif,
1124     int gone)
1125 {
1126 	struct ifnet *ifs = bif->bif_ifp;
1127 	struct ifnet *fif = NULL;
1128 	struct bridge_iflist *bifl;
1129 
1130 	BRIDGE_LOCK_ASSERT(sc);
1131 
1132 	if (bif->bif_flags & IFBIF_STP)
1133 		bstp_disable(&bif->bif_stp);
1134 
1135 	ifs->if_bridge = NULL;
1136 	CK_LIST_REMOVE(bif, bif_next);
1137 
1138 	/*
1139 	 * If removing the interface that gave the bridge its mac address, set
1140 	 * the mac address of the bridge to the address of the next member, or
1141 	 * to its default address if no members are left.
1142 	 */
1143 	if (V_bridge_inherit_mac && sc->sc_ifaddr == ifs) {
1144 		if (CK_LIST_EMPTY(&sc->sc_iflist)) {
1145 			bcopy(&sc->sc_defaddr,
1146 			    IF_LLADDR(sc->sc_ifp), ETHER_ADDR_LEN);
1147 			sc->sc_ifaddr = NULL;
1148 		} else {
1149 			bifl = CK_LIST_FIRST(&sc->sc_iflist);
1150 			fif = bifl->bif_ifp;
1151 			bcopy(IF_LLADDR(fif),
1152 			    IF_LLADDR(sc->sc_ifp), ETHER_ADDR_LEN);
1153 			sc->sc_ifaddr = fif;
1154 		}
1155 		EVENTHANDLER_INVOKE(iflladdr_event, sc->sc_ifp);
1156 	}
1157 
1158 	bridge_linkcheck(sc);
1159 	bridge_mutecaps(sc);	/* recalcuate now this interface is removed */
1160 	BRIDGE_RT_LOCK(sc);
1161 	bridge_rtdelete(sc, ifs, IFBF_FLUSHALL);
1162 	BRIDGE_RT_UNLOCK(sc);
1163 	KASSERT(bif->bif_addrcnt == 0,
1164 	    ("%s: %d bridge routes referenced", __func__, bif->bif_addrcnt));
1165 
1166 	ifs->if_bridge_output = NULL;
1167 	ifs->if_bridge_input = NULL;
1168 	ifs->if_bridge_linkstate = NULL;
1169 	if (!gone) {
1170 		switch (ifs->if_type) {
1171 		case IFT_ETHER:
1172 		case IFT_L2VLAN:
1173 			/*
1174 			 * Take the interface out of promiscuous mode, but only
1175 			 * if it was promiscuous in the first place. It might
1176 			 * not be if we're in the bridge_ioctl_add() error path.
1177 			 */
1178 			if (ifs->if_flags & IFF_PROMISC)
1179 				(void) ifpromisc(ifs, 0);
1180 			break;
1181 
1182 		case IFT_GIF:
1183 			break;
1184 
1185 		default:
1186 #ifdef DIAGNOSTIC
1187 			panic("bridge_delete_member: impossible");
1188 #endif
1189 			break;
1190 		}
1191 		/* reneable any interface capabilities */
1192 		bridge_set_ifcap(sc, bif, bif->bif_savedcaps);
1193 	}
1194 	bstp_destroy(&bif->bif_stp);	/* prepare to free */
1195 
1196 	NET_EPOCH_CALL(bridge_delete_member_cb, &bif->bif_epoch_ctx);
1197 }
1198 
1199 /*
1200  * bridge_delete_span:
1201  *
1202  *	Delete the specified span interface.
1203  */
1204 static void
1205 bridge_delete_span(struct bridge_softc *sc, struct bridge_iflist *bif)
1206 {
1207 	BRIDGE_LOCK_ASSERT(sc);
1208 
1209 	KASSERT(bif->bif_ifp->if_bridge == NULL,
1210 	    ("%s: not a span interface", __func__));
1211 
1212 	CK_LIST_REMOVE(bif, bif_next);
1213 
1214 	NET_EPOCH_CALL(bridge_delete_member_cb, &bif->bif_epoch_ctx);
1215 }
1216 
1217 static int
1218 bridge_ioctl_add(struct bridge_softc *sc, void *arg)
1219 {
1220 	struct ifbreq *req = arg;
1221 	struct bridge_iflist *bif = NULL;
1222 	struct ifnet *ifs;
1223 	int error = 0;
1224 
1225 	ifs = ifunit(req->ifbr_ifsname);
1226 	if (ifs == NULL)
1227 		return (ENOENT);
1228 	if (ifs->if_ioctl == NULL)	/* must be supported */
1229 		return (EINVAL);
1230 
1231 	/* If it's in the span list, it can't be a member. */
1232 	CK_LIST_FOREACH(bif, &sc->sc_spanlist, bif_next)
1233 		if (ifs == bif->bif_ifp)
1234 			return (EBUSY);
1235 
1236 	if (ifs->if_bridge == sc)
1237 		return (EEXIST);
1238 
1239 	if (ifs->if_bridge != NULL)
1240 		return (EBUSY);
1241 
1242 	switch (ifs->if_type) {
1243 	case IFT_ETHER:
1244 	case IFT_L2VLAN:
1245 	case IFT_GIF:
1246 		/* permitted interface types */
1247 		break;
1248 	default:
1249 		return (EINVAL);
1250 	}
1251 
1252 #ifdef INET6
1253 	/*
1254 	 * Two valid inet6 addresses with link-local scope must not be
1255 	 * on the parent interface and the member interfaces at the
1256 	 * same time.  This restriction is needed to prevent violation
1257 	 * of link-local scope zone.  Attempts to add a member
1258 	 * interface which has inet6 addresses when the parent has
1259 	 * inet6 triggers removal of all inet6 addresses on the member
1260 	 * interface.
1261 	 */
1262 
1263 	/* Check if the parent interface has a link-local scope addr. */
1264 	if (V_allow_llz_overlap == 0 &&
1265 	    in6ifa_llaonifp(sc->sc_ifp) != NULL) {
1266 		/*
1267 		 * If any, remove all inet6 addresses from the member
1268 		 * interfaces.
1269 		 */
1270 		CK_LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
1271  			if (in6ifa_llaonifp(bif->bif_ifp)) {
1272 				in6_ifdetach(bif->bif_ifp);
1273 				if_printf(sc->sc_ifp,
1274 				    "IPv6 addresses on %s have been removed "
1275 				    "before adding it as a member to prevent "
1276 				    "IPv6 address scope violation.\n",
1277 				    bif->bif_ifp->if_xname);
1278 			}
1279 		}
1280 		if (in6ifa_llaonifp(ifs)) {
1281 			in6_ifdetach(ifs);
1282 			if_printf(sc->sc_ifp,
1283 			    "IPv6 addresses on %s have been removed "
1284 			    "before adding it as a member to prevent "
1285 			    "IPv6 address scope violation.\n",
1286 			    ifs->if_xname);
1287 		}
1288 	}
1289 #endif
1290 	/* Allow the first Ethernet member to define the MTU */
1291 	if (CK_LIST_EMPTY(&sc->sc_iflist))
1292 		sc->sc_ifp->if_mtu = ifs->if_mtu;
1293 	else if (sc->sc_ifp->if_mtu != ifs->if_mtu) {
1294 		struct ifreq ifr;
1295 
1296 		snprintf(ifr.ifr_name, sizeof(ifr.ifr_name), "%s",
1297 		    ifs->if_xname);
1298 		ifr.ifr_mtu = sc->sc_ifp->if_mtu;
1299 
1300 		error = (*ifs->if_ioctl)(ifs,
1301 		    SIOCSIFMTU, (caddr_t)&ifr);
1302 		if (error != 0) {
1303 			log(LOG_NOTICE, "%s: invalid MTU: %u for"
1304 			    " new member %s\n", sc->sc_ifp->if_xname,
1305 			    ifr.ifr_mtu,
1306 			    ifs->if_xname);
1307 			return (EINVAL);
1308 		}
1309 	}
1310 
1311 	bif = malloc(sizeof(*bif), M_DEVBUF, M_NOWAIT|M_ZERO);
1312 	if (bif == NULL)
1313 		return (ENOMEM);
1314 
1315 	bif->bif_ifp = ifs;
1316 	bif->bif_flags = IFBIF_LEARNING | IFBIF_DISCOVER;
1317 	bif->bif_savedcaps = ifs->if_capenable;
1318 
1319 	/*
1320 	 * Assign the interface's MAC address to the bridge if it's the first
1321 	 * member and the MAC address of the bridge has not been changed from
1322 	 * the default randomly generated one.
1323 	 */
1324 	if (V_bridge_inherit_mac && CK_LIST_EMPTY(&sc->sc_iflist) &&
1325 	    !memcmp(IF_LLADDR(sc->sc_ifp), sc->sc_defaddr.octet, ETHER_ADDR_LEN)) {
1326 		bcopy(IF_LLADDR(ifs), IF_LLADDR(sc->sc_ifp), ETHER_ADDR_LEN);
1327 		sc->sc_ifaddr = ifs;
1328 		EVENTHANDLER_INVOKE(iflladdr_event, sc->sc_ifp);
1329 	}
1330 
1331 	ifs->if_bridge = sc;
1332 	ifs->if_bridge_output = bridge_output;
1333 	ifs->if_bridge_input = bridge_input;
1334 	ifs->if_bridge_linkstate = bridge_linkstate;
1335 	bstp_create(&sc->sc_stp, &bif->bif_stp, bif->bif_ifp);
1336 	/*
1337 	 * XXX: XLOCK HERE!?!
1338 	 *
1339 	 * NOTE: insert_***HEAD*** should be safe for the traversals.
1340 	 */
1341 	CK_LIST_INSERT_HEAD(&sc->sc_iflist, bif, bif_next);
1342 
1343 	/* Set interface capabilities to the intersection set of all members */
1344 	bridge_mutecaps(sc);
1345 	bridge_linkcheck(sc);
1346 
1347 	/* Place the interface into promiscuous mode */
1348 	switch (ifs->if_type) {
1349 		case IFT_ETHER:
1350 		case IFT_L2VLAN:
1351 			error = ifpromisc(ifs, 1);
1352 			break;
1353 	}
1354 
1355 	if (error)
1356 		bridge_delete_member(sc, bif, 0);
1357 	return (error);
1358 }
1359 
1360 static int
1361 bridge_ioctl_del(struct bridge_softc *sc, void *arg)
1362 {
1363 	struct ifbreq *req = arg;
1364 	struct bridge_iflist *bif;
1365 
1366 	bif = bridge_lookup_member(sc, req->ifbr_ifsname);
1367 	if (bif == NULL)
1368 		return (ENOENT);
1369 
1370 	bridge_delete_member(sc, bif, 0);
1371 
1372 	return (0);
1373 }
1374 
1375 static int
1376 bridge_ioctl_gifflags(struct bridge_softc *sc, void *arg)
1377 {
1378 	struct ifbreq *req = arg;
1379 	struct bridge_iflist *bif;
1380 	struct bstp_port *bp;
1381 
1382 	bif = bridge_lookup_member(sc, req->ifbr_ifsname);
1383 	if (bif == NULL)
1384 		return (ENOENT);
1385 
1386 	bp = &bif->bif_stp;
1387 	req->ifbr_ifsflags = bif->bif_flags;
1388 	req->ifbr_state = bp->bp_state;
1389 	req->ifbr_priority = bp->bp_priority;
1390 	req->ifbr_path_cost = bp->bp_path_cost;
1391 	req->ifbr_portno = bif->bif_ifp->if_index & 0xfff;
1392 	req->ifbr_proto = bp->bp_protover;
1393 	req->ifbr_role = bp->bp_role;
1394 	req->ifbr_stpflags = bp->bp_flags;
1395 	req->ifbr_addrcnt = bif->bif_addrcnt;
1396 	req->ifbr_addrmax = bif->bif_addrmax;
1397 	req->ifbr_addrexceeded = bif->bif_addrexceeded;
1398 
1399 	/* Copy STP state options as flags */
1400 	if (bp->bp_operedge)
1401 		req->ifbr_ifsflags |= IFBIF_BSTP_EDGE;
1402 	if (bp->bp_flags & BSTP_PORT_AUTOEDGE)
1403 		req->ifbr_ifsflags |= IFBIF_BSTP_AUTOEDGE;
1404 	if (bp->bp_ptp_link)
1405 		req->ifbr_ifsflags |= IFBIF_BSTP_PTP;
1406 	if (bp->bp_flags & BSTP_PORT_AUTOPTP)
1407 		req->ifbr_ifsflags |= IFBIF_BSTP_AUTOPTP;
1408 	if (bp->bp_flags & BSTP_PORT_ADMEDGE)
1409 		req->ifbr_ifsflags |= IFBIF_BSTP_ADMEDGE;
1410 	if (bp->bp_flags & BSTP_PORT_ADMCOST)
1411 		req->ifbr_ifsflags |= IFBIF_BSTP_ADMCOST;
1412 	return (0);
1413 }
1414 
1415 static int
1416 bridge_ioctl_sifflags(struct bridge_softc *sc, void *arg)
1417 {
1418 	struct epoch_tracker et;
1419 	struct ifbreq *req = arg;
1420 	struct bridge_iflist *bif;
1421 	struct bstp_port *bp;
1422 	int error;
1423 
1424 	bif = bridge_lookup_member(sc, req->ifbr_ifsname);
1425 	if (bif == NULL)
1426 		return (ENOENT);
1427 	bp = &bif->bif_stp;
1428 
1429 	if (req->ifbr_ifsflags & IFBIF_SPAN)
1430 		/* SPAN is readonly */
1431 		return (EINVAL);
1432 
1433 	NET_EPOCH_ENTER(et);
1434 
1435 	if (req->ifbr_ifsflags & IFBIF_STP) {
1436 		if ((bif->bif_flags & IFBIF_STP) == 0) {
1437 			error = bstp_enable(&bif->bif_stp);
1438 			if (error) {
1439 				NET_EPOCH_EXIT(et);
1440 				return (error);
1441 			}
1442 		}
1443 	} else {
1444 		if ((bif->bif_flags & IFBIF_STP) != 0)
1445 			bstp_disable(&bif->bif_stp);
1446 	}
1447 
1448 	/* Pass on STP flags */
1449 	bstp_set_edge(bp, req->ifbr_ifsflags & IFBIF_BSTP_EDGE ? 1 : 0);
1450 	bstp_set_autoedge(bp, req->ifbr_ifsflags & IFBIF_BSTP_AUTOEDGE ? 1 : 0);
1451 	bstp_set_ptp(bp, req->ifbr_ifsflags & IFBIF_BSTP_PTP ? 1 : 0);
1452 	bstp_set_autoptp(bp, req->ifbr_ifsflags & IFBIF_BSTP_AUTOPTP ? 1 : 0);
1453 
1454 	/* Save the bits relating to the bridge */
1455 	bif->bif_flags = req->ifbr_ifsflags & IFBIFMASK;
1456 
1457 	NET_EPOCH_EXIT(et);
1458 
1459 	return (0);
1460 }
1461 
1462 static int
1463 bridge_ioctl_scache(struct bridge_softc *sc, void *arg)
1464 {
1465 	struct ifbrparam *param = arg;
1466 
1467 	sc->sc_brtmax = param->ifbrp_csize;
1468 	bridge_rttrim(sc);
1469 
1470 	return (0);
1471 }
1472 
1473 static int
1474 bridge_ioctl_gcache(struct bridge_softc *sc, void *arg)
1475 {
1476 	struct ifbrparam *param = arg;
1477 
1478 	param->ifbrp_csize = sc->sc_brtmax;
1479 
1480 	return (0);
1481 }
1482 
1483 static int
1484 bridge_ioctl_gifs(struct bridge_softc *sc, void *arg)
1485 {
1486 	struct ifbifconf *bifc = arg;
1487 	struct bridge_iflist *bif;
1488 	struct ifbreq breq;
1489 	char *buf, *outbuf;
1490 	int count, buflen, len, error = 0;
1491 
1492 	count = 0;
1493 	CK_LIST_FOREACH(bif, &sc->sc_iflist, bif_next)
1494 		count++;
1495 	CK_LIST_FOREACH(bif, &sc->sc_spanlist, bif_next)
1496 		count++;
1497 
1498 	buflen = sizeof(breq) * count;
1499 	if (bifc->ifbic_len == 0) {
1500 		bifc->ifbic_len = buflen;
1501 		return (0);
1502 	}
1503 	outbuf = malloc(buflen, M_TEMP, M_NOWAIT | M_ZERO);
1504 	if (outbuf == NULL)
1505 		return (ENOMEM);
1506 
1507 	count = 0;
1508 	buf = outbuf;
1509 	len = min(bifc->ifbic_len, buflen);
1510 	bzero(&breq, sizeof(breq));
1511 	CK_LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
1512 		if (len < sizeof(breq))
1513 			break;
1514 
1515 		strlcpy(breq.ifbr_ifsname, bif->bif_ifp->if_xname,
1516 		    sizeof(breq.ifbr_ifsname));
1517 		/* Fill in the ifbreq structure */
1518 		error = bridge_ioctl_gifflags(sc, &breq);
1519 		if (error)
1520 			break;
1521 		memcpy(buf, &breq, sizeof(breq));
1522 		count++;
1523 		buf += sizeof(breq);
1524 		len -= sizeof(breq);
1525 	}
1526 	CK_LIST_FOREACH(bif, &sc->sc_spanlist, bif_next) {
1527 		if (len < sizeof(breq))
1528 			break;
1529 
1530 		strlcpy(breq.ifbr_ifsname, bif->bif_ifp->if_xname,
1531 		    sizeof(breq.ifbr_ifsname));
1532 		breq.ifbr_ifsflags = bif->bif_flags;
1533 		breq.ifbr_portno = bif->bif_ifp->if_index & 0xfff;
1534 		memcpy(buf, &breq, sizeof(breq));
1535 		count++;
1536 		buf += sizeof(breq);
1537 		len -= sizeof(breq);
1538 	}
1539 
1540 	bifc->ifbic_len = sizeof(breq) * count;
1541 	error = copyout(outbuf, bifc->ifbic_req, bifc->ifbic_len);
1542 	free(outbuf, M_TEMP);
1543 	return (error);
1544 }
1545 
1546 static int
1547 bridge_ioctl_rts(struct bridge_softc *sc, void *arg)
1548 {
1549 	struct ifbaconf *bac = arg;
1550 	struct bridge_rtnode *brt;
1551 	struct ifbareq bareq;
1552 	char *buf, *outbuf;
1553 	int count, buflen, len, error = 0;
1554 
1555 	if (bac->ifbac_len == 0)
1556 		return (0);
1557 
1558 	count = 0;
1559 	CK_LIST_FOREACH(brt, &sc->sc_rtlist, brt_list)
1560 		count++;
1561 	buflen = sizeof(bareq) * count;
1562 
1563 	outbuf = malloc(buflen, M_TEMP, M_NOWAIT | M_ZERO);
1564 	if (outbuf == NULL)
1565 		return (ENOMEM);
1566 
1567 	count = 0;
1568 	buf = outbuf;
1569 	len = min(bac->ifbac_len, buflen);
1570 	bzero(&bareq, sizeof(bareq));
1571 	CK_LIST_FOREACH(brt, &sc->sc_rtlist, brt_list) {
1572 		if (len < sizeof(bareq))
1573 			goto out;
1574 		strlcpy(bareq.ifba_ifsname, brt->brt_ifp->if_xname,
1575 		    sizeof(bareq.ifba_ifsname));
1576 		memcpy(bareq.ifba_dst, brt->brt_addr, sizeof(brt->brt_addr));
1577 		bareq.ifba_vlan = brt->brt_vlan;
1578 		if ((brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC &&
1579 				time_uptime < brt->brt_expire)
1580 			bareq.ifba_expire = brt->brt_expire - time_uptime;
1581 		else
1582 			bareq.ifba_expire = 0;
1583 		bareq.ifba_flags = brt->brt_flags;
1584 
1585 		memcpy(buf, &bareq, sizeof(bareq));
1586 		count++;
1587 		buf += sizeof(bareq);
1588 		len -= sizeof(bareq);
1589 	}
1590 out:
1591 	bac->ifbac_len = sizeof(bareq) * count;
1592 	error = copyout(outbuf, bac->ifbac_req, bac->ifbac_len);
1593 	free(outbuf, M_TEMP);
1594 	return (error);
1595 }
1596 
1597 static int
1598 bridge_ioctl_saddr(struct bridge_softc *sc, void *arg)
1599 {
1600 	struct ifbareq *req = arg;
1601 	struct bridge_iflist *bif;
1602 	struct epoch_tracker et;
1603 	int error;
1604 
1605 	NET_EPOCH_ENTER(et);
1606 	bif = bridge_lookup_member(sc, req->ifba_ifsname);
1607 	if (bif == NULL) {
1608 		NET_EPOCH_EXIT(et);
1609 		return (ENOENT);
1610 	}
1611 
1612 	/* bridge_rtupdate() may acquire the lock. */
1613 	error = bridge_rtupdate(sc, req->ifba_dst, req->ifba_vlan, bif, 1,
1614 	    req->ifba_flags);
1615 	NET_EPOCH_EXIT(et);
1616 
1617 	return (error);
1618 }
1619 
1620 static int
1621 bridge_ioctl_sto(struct bridge_softc *sc, void *arg)
1622 {
1623 	struct ifbrparam *param = arg;
1624 
1625 	sc->sc_brttimeout = param->ifbrp_ctime;
1626 	return (0);
1627 }
1628 
1629 static int
1630 bridge_ioctl_gto(struct bridge_softc *sc, void *arg)
1631 {
1632 	struct ifbrparam *param = arg;
1633 
1634 	param->ifbrp_ctime = sc->sc_brttimeout;
1635 	return (0);
1636 }
1637 
1638 static int
1639 bridge_ioctl_daddr(struct bridge_softc *sc, void *arg)
1640 {
1641 	struct ifbareq *req = arg;
1642 
1643 	return (bridge_rtdaddr(sc, req->ifba_dst, req->ifba_vlan));
1644 }
1645 
1646 static int
1647 bridge_ioctl_flush(struct bridge_softc *sc, void *arg)
1648 {
1649 	struct ifbreq *req = arg;
1650 
1651 	BRIDGE_RT_LOCK(sc);
1652 	bridge_rtflush(sc, req->ifbr_ifsflags);
1653 	BRIDGE_RT_UNLOCK(sc);
1654 
1655 	return (0);
1656 }
1657 
1658 static int
1659 bridge_ioctl_gpri(struct bridge_softc *sc, void *arg)
1660 {
1661 	struct ifbrparam *param = arg;
1662 	struct bstp_state *bs = &sc->sc_stp;
1663 
1664 	param->ifbrp_prio = bs->bs_bridge_priority;
1665 	return (0);
1666 }
1667 
1668 static int
1669 bridge_ioctl_spri(struct bridge_softc *sc, void *arg)
1670 {
1671 	struct ifbrparam *param = arg;
1672 
1673 	return (bstp_set_priority(&sc->sc_stp, param->ifbrp_prio));
1674 }
1675 
1676 static int
1677 bridge_ioctl_ght(struct bridge_softc *sc, void *arg)
1678 {
1679 	struct ifbrparam *param = arg;
1680 	struct bstp_state *bs = &sc->sc_stp;
1681 
1682 	param->ifbrp_hellotime = bs->bs_bridge_htime >> 8;
1683 	return (0);
1684 }
1685 
1686 static int
1687 bridge_ioctl_sht(struct bridge_softc *sc, void *arg)
1688 {
1689 	struct ifbrparam *param = arg;
1690 
1691 	return (bstp_set_htime(&sc->sc_stp, param->ifbrp_hellotime));
1692 }
1693 
1694 static int
1695 bridge_ioctl_gfd(struct bridge_softc *sc, void *arg)
1696 {
1697 	struct ifbrparam *param = arg;
1698 	struct bstp_state *bs = &sc->sc_stp;
1699 
1700 	param->ifbrp_fwddelay = bs->bs_bridge_fdelay >> 8;
1701 	return (0);
1702 }
1703 
1704 static int
1705 bridge_ioctl_sfd(struct bridge_softc *sc, void *arg)
1706 {
1707 	struct ifbrparam *param = arg;
1708 
1709 	return (bstp_set_fdelay(&sc->sc_stp, param->ifbrp_fwddelay));
1710 }
1711 
1712 static int
1713 bridge_ioctl_gma(struct bridge_softc *sc, void *arg)
1714 {
1715 	struct ifbrparam *param = arg;
1716 	struct bstp_state *bs = &sc->sc_stp;
1717 
1718 	param->ifbrp_maxage = bs->bs_bridge_max_age >> 8;
1719 	return (0);
1720 }
1721 
1722 static int
1723 bridge_ioctl_sma(struct bridge_softc *sc, void *arg)
1724 {
1725 	struct ifbrparam *param = arg;
1726 
1727 	return (bstp_set_maxage(&sc->sc_stp, param->ifbrp_maxage));
1728 }
1729 
1730 static int
1731 bridge_ioctl_sifprio(struct bridge_softc *sc, void *arg)
1732 {
1733 	struct ifbreq *req = arg;
1734 	struct bridge_iflist *bif;
1735 
1736 	bif = bridge_lookup_member(sc, req->ifbr_ifsname);
1737 	if (bif == NULL)
1738 		return (ENOENT);
1739 
1740 	return (bstp_set_port_priority(&bif->bif_stp, req->ifbr_priority));
1741 }
1742 
1743 static int
1744 bridge_ioctl_sifcost(struct bridge_softc *sc, void *arg)
1745 {
1746 	struct ifbreq *req = arg;
1747 	struct bridge_iflist *bif;
1748 
1749 	bif = bridge_lookup_member(sc, req->ifbr_ifsname);
1750 	if (bif == NULL)
1751 		return (ENOENT);
1752 
1753 	return (bstp_set_path_cost(&bif->bif_stp, req->ifbr_path_cost));
1754 }
1755 
1756 static int
1757 bridge_ioctl_sifmaxaddr(struct bridge_softc *sc, void *arg)
1758 {
1759 	struct ifbreq *req = arg;
1760 	struct bridge_iflist *bif;
1761 
1762 	bif = bridge_lookup_member(sc, req->ifbr_ifsname);
1763 	if (bif == NULL)
1764 		return (ENOENT);
1765 
1766 	bif->bif_addrmax = req->ifbr_addrmax;
1767 	return (0);
1768 }
1769 
1770 static int
1771 bridge_ioctl_addspan(struct bridge_softc *sc, void *arg)
1772 {
1773 	struct ifbreq *req = arg;
1774 	struct bridge_iflist *bif = NULL;
1775 	struct ifnet *ifs;
1776 
1777 	ifs = ifunit(req->ifbr_ifsname);
1778 	if (ifs == NULL)
1779 		return (ENOENT);
1780 
1781 	CK_LIST_FOREACH(bif, &sc->sc_spanlist, bif_next)
1782 		if (ifs == bif->bif_ifp)
1783 			return (EBUSY);
1784 
1785 	if (ifs->if_bridge != NULL)
1786 		return (EBUSY);
1787 
1788 	switch (ifs->if_type) {
1789 		case IFT_ETHER:
1790 		case IFT_GIF:
1791 		case IFT_L2VLAN:
1792 			break;
1793 		default:
1794 			return (EINVAL);
1795 	}
1796 
1797 	bif = malloc(sizeof(*bif), M_DEVBUF, M_NOWAIT|M_ZERO);
1798 	if (bif == NULL)
1799 		return (ENOMEM);
1800 
1801 	bif->bif_ifp = ifs;
1802 	bif->bif_flags = IFBIF_SPAN;
1803 
1804 	CK_LIST_INSERT_HEAD(&sc->sc_spanlist, bif, bif_next);
1805 
1806 	return (0);
1807 }
1808 
1809 static int
1810 bridge_ioctl_delspan(struct bridge_softc *sc, void *arg)
1811 {
1812 	struct ifbreq *req = arg;
1813 	struct bridge_iflist *bif;
1814 	struct ifnet *ifs;
1815 
1816 	ifs = ifunit(req->ifbr_ifsname);
1817 	if (ifs == NULL)
1818 		return (ENOENT);
1819 
1820 	CK_LIST_FOREACH(bif, &sc->sc_spanlist, bif_next)
1821 		if (ifs == bif->bif_ifp)
1822 			break;
1823 
1824 	if (bif == NULL)
1825 		return (ENOENT);
1826 
1827 	bridge_delete_span(sc, bif);
1828 
1829 	return (0);
1830 }
1831 
1832 static int
1833 bridge_ioctl_gbparam(struct bridge_softc *sc, void *arg)
1834 {
1835 	struct ifbropreq *req = arg;
1836 	struct bstp_state *bs = &sc->sc_stp;
1837 	struct bstp_port *root_port;
1838 
1839 	req->ifbop_maxage = bs->bs_bridge_max_age >> 8;
1840 	req->ifbop_hellotime = bs->bs_bridge_htime >> 8;
1841 	req->ifbop_fwddelay = bs->bs_bridge_fdelay >> 8;
1842 
1843 	root_port = bs->bs_root_port;
1844 	if (root_port == NULL)
1845 		req->ifbop_root_port = 0;
1846 	else
1847 		req->ifbop_root_port = root_port->bp_ifp->if_index;
1848 
1849 	req->ifbop_holdcount = bs->bs_txholdcount;
1850 	req->ifbop_priority = bs->bs_bridge_priority;
1851 	req->ifbop_protocol = bs->bs_protover;
1852 	req->ifbop_root_path_cost = bs->bs_root_pv.pv_cost;
1853 	req->ifbop_bridgeid = bs->bs_bridge_pv.pv_dbridge_id;
1854 	req->ifbop_designated_root = bs->bs_root_pv.pv_root_id;
1855 	req->ifbop_designated_bridge = bs->bs_root_pv.pv_dbridge_id;
1856 	req->ifbop_last_tc_time.tv_sec = bs->bs_last_tc_time.tv_sec;
1857 	req->ifbop_last_tc_time.tv_usec = bs->bs_last_tc_time.tv_usec;
1858 
1859 	return (0);
1860 }
1861 
1862 static int
1863 bridge_ioctl_grte(struct bridge_softc *sc, void *arg)
1864 {
1865 	struct ifbrparam *param = arg;
1866 
1867 	param->ifbrp_cexceeded = sc->sc_brtexceeded;
1868 	return (0);
1869 }
1870 
1871 static int
1872 bridge_ioctl_gifsstp(struct bridge_softc *sc, void *arg)
1873 {
1874 	struct ifbpstpconf *bifstp = arg;
1875 	struct bridge_iflist *bif;
1876 	struct bstp_port *bp;
1877 	struct ifbpstpreq bpreq;
1878 	char *buf, *outbuf;
1879 	int count, buflen, len, error = 0;
1880 
1881 	count = 0;
1882 	CK_LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
1883 		if ((bif->bif_flags & IFBIF_STP) != 0)
1884 			count++;
1885 	}
1886 
1887 	buflen = sizeof(bpreq) * count;
1888 	if (bifstp->ifbpstp_len == 0) {
1889 		bifstp->ifbpstp_len = buflen;
1890 		return (0);
1891 	}
1892 
1893 	outbuf = malloc(buflen, M_TEMP, M_NOWAIT | M_ZERO);
1894 	if (outbuf == NULL)
1895 		return (ENOMEM);
1896 
1897 	count = 0;
1898 	buf = outbuf;
1899 	len = min(bifstp->ifbpstp_len, buflen);
1900 	bzero(&bpreq, sizeof(bpreq));
1901 	CK_LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
1902 		if (len < sizeof(bpreq))
1903 			break;
1904 
1905 		if ((bif->bif_flags & IFBIF_STP) == 0)
1906 			continue;
1907 
1908 		bp = &bif->bif_stp;
1909 		bpreq.ifbp_portno = bif->bif_ifp->if_index & 0xfff;
1910 		bpreq.ifbp_fwd_trans = bp->bp_forward_transitions;
1911 		bpreq.ifbp_design_cost = bp->bp_desg_pv.pv_cost;
1912 		bpreq.ifbp_design_port = bp->bp_desg_pv.pv_port_id;
1913 		bpreq.ifbp_design_bridge = bp->bp_desg_pv.pv_dbridge_id;
1914 		bpreq.ifbp_design_root = bp->bp_desg_pv.pv_root_id;
1915 
1916 		memcpy(buf, &bpreq, sizeof(bpreq));
1917 		count++;
1918 		buf += sizeof(bpreq);
1919 		len -= sizeof(bpreq);
1920 	}
1921 
1922 	bifstp->ifbpstp_len = sizeof(bpreq) * count;
1923 	error = copyout(outbuf, bifstp->ifbpstp_req, bifstp->ifbpstp_len);
1924 	free(outbuf, M_TEMP);
1925 	return (error);
1926 }
1927 
1928 static int
1929 bridge_ioctl_sproto(struct bridge_softc *sc, void *arg)
1930 {
1931 	struct ifbrparam *param = arg;
1932 
1933 	return (bstp_set_protocol(&sc->sc_stp, param->ifbrp_proto));
1934 }
1935 
1936 static int
1937 bridge_ioctl_stxhc(struct bridge_softc *sc, void *arg)
1938 {
1939 	struct ifbrparam *param = arg;
1940 
1941 	return (bstp_set_holdcount(&sc->sc_stp, param->ifbrp_txhc));
1942 }
1943 
1944 /*
1945  * bridge_ifdetach:
1946  *
1947  *	Detach an interface from a bridge.  Called when a member
1948  *	interface is detaching.
1949  */
1950 static void
1951 bridge_ifdetach(void *arg __unused, struct ifnet *ifp)
1952 {
1953 	struct bridge_softc *sc = ifp->if_bridge;
1954 	struct bridge_iflist *bif;
1955 
1956 	if (ifp->if_flags & IFF_RENAMING)
1957 		return;
1958 	if (V_bridge_cloner == NULL) {
1959 		/*
1960 		 * This detach handler can be called after
1961 		 * vnet_bridge_uninit().  Just return in that case.
1962 		 */
1963 		return;
1964 	}
1965 	/* Check if the interface is a bridge member */
1966 	if (sc != NULL) {
1967 		BRIDGE_LOCK(sc);
1968 
1969 		bif = bridge_lookup_member_if(sc, ifp);
1970 		if (bif != NULL)
1971 			bridge_delete_member(sc, bif, 1);
1972 
1973 		BRIDGE_UNLOCK(sc);
1974 		return;
1975 	}
1976 
1977 	/* Check if the interface is a span port */
1978 	BRIDGE_LIST_LOCK();
1979 	LIST_FOREACH(sc, &V_bridge_list, sc_list) {
1980 		BRIDGE_LOCK(sc);
1981 		CK_LIST_FOREACH(bif, &sc->sc_spanlist, bif_next)
1982 			if (ifp == bif->bif_ifp) {
1983 				bridge_delete_span(sc, bif);
1984 				break;
1985 			}
1986 
1987 		BRIDGE_UNLOCK(sc);
1988 	}
1989 	BRIDGE_LIST_UNLOCK();
1990 }
1991 
1992 /*
1993  * bridge_init:
1994  *
1995  *	Initialize a bridge interface.
1996  */
1997 static void
1998 bridge_init(void *xsc)
1999 {
2000 	struct bridge_softc *sc = (struct bridge_softc *)xsc;
2001 	struct ifnet *ifp = sc->sc_ifp;
2002 
2003 	if (ifp->if_drv_flags & IFF_DRV_RUNNING)
2004 		return;
2005 
2006 	BRIDGE_LOCK(sc);
2007 	callout_reset(&sc->sc_brcallout, bridge_rtable_prune_period * hz,
2008 	    bridge_timer, sc);
2009 
2010 	ifp->if_drv_flags |= IFF_DRV_RUNNING;
2011 	bstp_init(&sc->sc_stp);		/* Initialize Spanning Tree */
2012 
2013 	BRIDGE_UNLOCK(sc);
2014 }
2015 
2016 /*
2017  * bridge_stop:
2018  *
2019  *	Stop the bridge interface.
2020  */
2021 static void
2022 bridge_stop(struct ifnet *ifp, int disable)
2023 {
2024 	struct bridge_softc *sc = ifp->if_softc;
2025 
2026 	BRIDGE_LOCK_ASSERT(sc);
2027 
2028 	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
2029 		return;
2030 
2031 	BRIDGE_RT_LOCK(sc);
2032 	callout_stop(&sc->sc_brcallout);
2033 
2034 	bstp_stop(&sc->sc_stp);
2035 
2036 	bridge_rtflush(sc, IFBF_FLUSHDYN);
2037 	BRIDGE_RT_UNLOCK(sc);
2038 
2039 	ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2040 }
2041 
2042 /*
2043  * bridge_enqueue:
2044  *
2045  *	Enqueue a packet on a bridge member interface.
2046  *
2047  */
2048 static int
2049 bridge_enqueue(struct bridge_softc *sc, struct ifnet *dst_ifp, struct mbuf *m)
2050 {
2051 	int len, err = 0;
2052 	short mflags;
2053 	struct mbuf *m0;
2054 
2055 	/* We may be sending a fragment so traverse the mbuf */
2056 	for (; m; m = m0) {
2057 		m0 = m->m_nextpkt;
2058 		m->m_nextpkt = NULL;
2059 		len = m->m_pkthdr.len;
2060 		mflags = m->m_flags;
2061 
2062 		/*
2063 		 * If underlying interface can not do VLAN tag insertion itself
2064 		 * then attach a packet tag that holds it.
2065 		 */
2066 		if ((m->m_flags & M_VLANTAG) &&
2067 		    (dst_ifp->if_capenable & IFCAP_VLAN_HWTAGGING) == 0) {
2068 			m = ether_vlanencap(m, m->m_pkthdr.ether_vtag);
2069 			if (m == NULL) {
2070 				if_printf(dst_ifp,
2071 				    "unable to prepend VLAN header\n");
2072 				if_inc_counter(dst_ifp, IFCOUNTER_OERRORS, 1);
2073 				continue;
2074 			}
2075 			m->m_flags &= ~M_VLANTAG;
2076 		}
2077 
2078 		M_ASSERTPKTHDR(m); /* We shouldn't transmit mbuf without pkthdr */
2079 		if ((err = dst_ifp->if_transmit(dst_ifp, m))) {
2080 			int n;
2081 
2082 			for (m = m0, n = 1; m != NULL; m = m0, n++) {
2083 				m0 = m->m_nextpkt;
2084 				m_freem(m);
2085 			}
2086 			if_inc_counter(sc->sc_ifp, IFCOUNTER_OERRORS, n);
2087 			break;
2088 		}
2089 
2090 		if_inc_counter(sc->sc_ifp, IFCOUNTER_OPACKETS, 1);
2091 		if_inc_counter(sc->sc_ifp, IFCOUNTER_OBYTES, len);
2092 		if (mflags & M_MCAST)
2093 			if_inc_counter(sc->sc_ifp, IFCOUNTER_OMCASTS, 1);
2094 	}
2095 
2096 	return (err);
2097 }
2098 
2099 /*
2100  * bridge_dummynet:
2101  *
2102  * 	Receive a queued packet from dummynet and pass it on to the output
2103  * 	interface.
2104  *
2105  *	The mbuf has the Ethernet header already attached.
2106  */
2107 static void
2108 bridge_dummynet(struct mbuf *m, struct ifnet *ifp)
2109 {
2110 	struct bridge_softc *sc;
2111 
2112 	sc = ifp->if_bridge;
2113 
2114 	/*
2115 	 * The packet didnt originate from a member interface. This should only
2116 	 * ever happen if a member interface is removed while packets are
2117 	 * queued for it.
2118 	 */
2119 	if (sc == NULL) {
2120 		m_freem(m);
2121 		return;
2122 	}
2123 
2124 	if (PFIL_HOOKED_OUT(V_inet_pfil_head)
2125 #ifdef INET6
2126 	    || PFIL_HOOKED_OUT(V_inet6_pfil_head)
2127 #endif
2128 	    ) {
2129 		if (bridge_pfil(&m, sc->sc_ifp, ifp, PFIL_OUT) != 0)
2130 			return;
2131 		if (m == NULL)
2132 			return;
2133 	}
2134 
2135 	bridge_enqueue(sc, ifp, m);
2136 }
2137 
2138 /*
2139  * bridge_output:
2140  *
2141  *	Send output from a bridge member interface.  This
2142  *	performs the bridging function for locally originated
2143  *	packets.
2144  *
2145  *	The mbuf has the Ethernet header already attached.  We must
2146  *	enqueue or free the mbuf before returning.
2147  */
2148 static int
2149 bridge_output(struct ifnet *ifp, struct mbuf *m, struct sockaddr *sa,
2150     struct rtentry *rt)
2151 {
2152 	struct ether_header *eh;
2153 	struct ifnet *bifp, *dst_if;
2154 	struct bridge_softc *sc;
2155 	uint16_t vlan;
2156 
2157 	NET_EPOCH_ASSERT();
2158 
2159 	if (m->m_len < ETHER_HDR_LEN) {
2160 		m = m_pullup(m, ETHER_HDR_LEN);
2161 		if (m == NULL)
2162 			return (0);
2163 	}
2164 
2165 	eh = mtod(m, struct ether_header *);
2166 	sc = ifp->if_bridge;
2167 	vlan = VLANTAGOF(m);
2168 
2169 	bifp = sc->sc_ifp;
2170 
2171 	/*
2172 	 * If bridge is down, but the original output interface is up,
2173 	 * go ahead and send out that interface.  Otherwise, the packet
2174 	 * is dropped below.
2175 	 */
2176 	if ((bifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
2177 		dst_if = ifp;
2178 		goto sendunicast;
2179 	}
2180 
2181 	/*
2182 	 * If the packet is a multicast, or we don't know a better way to
2183 	 * get there, send to all interfaces.
2184 	 */
2185 	if (ETHER_IS_MULTICAST(eh->ether_dhost))
2186 		dst_if = NULL;
2187 	else
2188 		dst_if = bridge_rtlookup(sc, eh->ether_dhost, vlan);
2189 	/* Tap any traffic not passing back out the originating interface */
2190 	if (dst_if != ifp)
2191 		ETHER_BPF_MTAP(bifp, m);
2192 	if (dst_if == NULL) {
2193 		struct bridge_iflist *bif;
2194 		struct mbuf *mc;
2195 		int used = 0;
2196 
2197 		bridge_span(sc, m);
2198 
2199 		CK_LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
2200 			dst_if = bif->bif_ifp;
2201 
2202 			if (dst_if->if_type == IFT_GIF)
2203 				continue;
2204 			if ((dst_if->if_drv_flags & IFF_DRV_RUNNING) == 0)
2205 				continue;
2206 
2207 			/*
2208 			 * If this is not the original output interface,
2209 			 * and the interface is participating in spanning
2210 			 * tree, make sure the port is in a state that
2211 			 * allows forwarding.
2212 			 */
2213 			if (dst_if != ifp && (bif->bif_flags & IFBIF_STP) &&
2214 			    bif->bif_stp.bp_state == BSTP_IFSTATE_DISCARDING)
2215 				continue;
2216 
2217 			if (CK_LIST_NEXT(bif, bif_next) == NULL) {
2218 				used = 1;
2219 				mc = m;
2220 			} else {
2221 				mc = m_dup(m, M_NOWAIT);
2222 				if (mc == NULL) {
2223 					if_inc_counter(bifp, IFCOUNTER_OERRORS, 1);
2224 					continue;
2225 				}
2226 			}
2227 
2228 			bridge_enqueue(sc, dst_if, mc);
2229 		}
2230 		if (used == 0)
2231 			m_freem(m);
2232 		return (0);
2233 	}
2234 
2235 sendunicast:
2236 	/*
2237 	 * XXX Spanning tree consideration here?
2238 	 */
2239 
2240 	bridge_span(sc, m);
2241 	if ((dst_if->if_drv_flags & IFF_DRV_RUNNING) == 0) {
2242 		m_freem(m);
2243 		return (0);
2244 	}
2245 
2246 	bridge_enqueue(sc, dst_if, m);
2247 	return (0);
2248 }
2249 
2250 /*
2251  * bridge_transmit:
2252  *
2253  *	Do output on a bridge.
2254  *
2255  */
2256 static int
2257 bridge_transmit(struct ifnet *ifp, struct mbuf *m)
2258 {
2259 	struct bridge_softc *sc;
2260 	struct ether_header *eh;
2261 	struct ifnet *dst_if;
2262 	int error = 0;
2263 
2264 	sc = ifp->if_softc;
2265 
2266 	ETHER_BPF_MTAP(ifp, m);
2267 
2268 	eh = mtod(m, struct ether_header *);
2269 
2270 	if (((m->m_flags & (M_BCAST|M_MCAST)) == 0) &&
2271 	    (dst_if = bridge_rtlookup(sc, eh->ether_dhost, 1)) != NULL) {
2272 		error = bridge_enqueue(sc, dst_if, m);
2273 	} else
2274 		bridge_broadcast(sc, ifp, m, 0);
2275 
2276 	return (error);
2277 }
2278 
2279 #ifdef ALTQ
2280 static void
2281 bridge_altq_start(if_t ifp)
2282 {
2283 	struct ifaltq *ifq = &ifp->if_snd;
2284 	struct mbuf *m;
2285 
2286 	IFQ_LOCK(ifq);
2287 	IFQ_DEQUEUE_NOLOCK(ifq, m);
2288 	while (m != NULL) {
2289 		bridge_transmit(ifp, m);
2290 		IFQ_DEQUEUE_NOLOCK(ifq, m);
2291 	}
2292 	IFQ_UNLOCK(ifq);
2293 }
2294 
2295 static int
2296 bridge_altq_transmit(if_t ifp, struct mbuf *m)
2297 {
2298 	int err;
2299 
2300 	if (ALTQ_IS_ENABLED(&ifp->if_snd)) {
2301 		IFQ_ENQUEUE(&ifp->if_snd, m, err);
2302 		if (err == 0)
2303 			bridge_altq_start(ifp);
2304 	} else
2305 		err = bridge_transmit(ifp, m);
2306 
2307 	return (err);
2308 }
2309 #endif	/* ALTQ */
2310 
2311 /*
2312  * The ifp->if_qflush entry point for if_bridge(4) is no-op.
2313  */
2314 static void
2315 bridge_qflush(struct ifnet *ifp __unused)
2316 {
2317 }
2318 
2319 /*
2320  * bridge_forward:
2321  *
2322  *	The forwarding function of the bridge.
2323  *
2324  *	NOTE: Releases the lock on return.
2325  */
2326 static void
2327 bridge_forward(struct bridge_softc *sc, struct bridge_iflist *sbif,
2328     struct mbuf *m)
2329 {
2330 	struct bridge_iflist *dbif;
2331 	struct ifnet *src_if, *dst_if, *ifp;
2332 	struct ether_header *eh;
2333 	uint16_t vlan;
2334 	uint8_t *dst;
2335 	int error;
2336 
2337 	NET_EPOCH_ASSERT();
2338 
2339 	src_if = m->m_pkthdr.rcvif;
2340 	ifp = sc->sc_ifp;
2341 
2342 	if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
2343 	if_inc_counter(ifp, IFCOUNTER_IBYTES, m->m_pkthdr.len);
2344 	vlan = VLANTAGOF(m);
2345 
2346 	if ((sbif->bif_flags & IFBIF_STP) &&
2347 	    sbif->bif_stp.bp_state == BSTP_IFSTATE_DISCARDING)
2348 		goto drop;
2349 
2350 	eh = mtod(m, struct ether_header *);
2351 	dst = eh->ether_dhost;
2352 
2353 	/* If the interface is learning, record the address. */
2354 	if (sbif->bif_flags & IFBIF_LEARNING) {
2355 		error = bridge_rtupdate(sc, eh->ether_shost, vlan,
2356 		    sbif, 0, IFBAF_DYNAMIC);
2357 		/*
2358 		 * If the interface has addresses limits then deny any source
2359 		 * that is not in the cache.
2360 		 */
2361 		if (error && sbif->bif_addrmax)
2362 			goto drop;
2363 	}
2364 
2365 	if ((sbif->bif_flags & IFBIF_STP) != 0 &&
2366 	    sbif->bif_stp.bp_state == BSTP_IFSTATE_LEARNING)
2367 		goto drop;
2368 
2369 #ifdef DEV_NETMAP
2370 	/*
2371 	 * Hand the packet to netmap only if it wasn't injected by netmap
2372 	 * itself.
2373 	 */
2374 	if ((m->m_flags & M_BRIDGE_INJECT) == 0 &&
2375 	    (if_getcapenable(ifp) & IFCAP_NETMAP) != 0) {
2376 		ifp->if_input(ifp, m);
2377 		return;
2378 	}
2379 	m->m_flags &= ~M_BRIDGE_INJECT;
2380 #endif
2381 
2382 	/*
2383 	 * At this point, the port either doesn't participate
2384 	 * in spanning tree or it is in the forwarding state.
2385 	 */
2386 
2387 	/*
2388 	 * If the packet is unicast, destined for someone on
2389 	 * "this" side of the bridge, drop it.
2390 	 */
2391 	if ((m->m_flags & (M_BCAST|M_MCAST)) == 0) {
2392 		dst_if = bridge_rtlookup(sc, dst, vlan);
2393 		if (src_if == dst_if)
2394 			goto drop;
2395 	} else {
2396 		/*
2397 		 * Check if its a reserved multicast address, any address
2398 		 * listed in 802.1D section 7.12.6 may not be forwarded by the
2399 		 * bridge.
2400 		 * This is currently 01-80-C2-00-00-00 to 01-80-C2-00-00-0F
2401 		 */
2402 		if (dst[0] == 0x01 && dst[1] == 0x80 &&
2403 		    dst[2] == 0xc2 && dst[3] == 0x00 &&
2404 		    dst[4] == 0x00 && dst[5] <= 0x0f)
2405 			goto drop;
2406 
2407 		/* ...forward it to all interfaces. */
2408 		if_inc_counter(ifp, IFCOUNTER_IMCASTS, 1);
2409 		dst_if = NULL;
2410 	}
2411 
2412 	/*
2413 	 * If we have a destination interface which is a member of our bridge,
2414 	 * OR this is a unicast packet, push it through the bpf(4) machinery.
2415 	 * For broadcast or multicast packets, don't bother because it will
2416 	 * be reinjected into ether_input. We do this before we pass the packets
2417 	 * through the pfil(9) framework, as it is possible that pfil(9) will
2418 	 * drop the packet, or possibly modify it, making it difficult to debug
2419 	 * firewall issues on the bridge.
2420 	 */
2421 	if (dst_if != NULL || (m->m_flags & (M_BCAST | M_MCAST)) == 0)
2422 		ETHER_BPF_MTAP(ifp, m);
2423 
2424 	/* run the packet filter */
2425 	if (PFIL_HOOKED_IN(V_inet_pfil_head)
2426 #ifdef INET6
2427 	    || PFIL_HOOKED_IN(V_inet6_pfil_head)
2428 #endif
2429 	    ) {
2430 		if (bridge_pfil(&m, ifp, src_if, PFIL_IN) != 0)
2431 			return;
2432 		if (m == NULL)
2433 			return;
2434 	}
2435 
2436 	if (dst_if == NULL) {
2437 		bridge_broadcast(sc, src_if, m, 1);
2438 		return;
2439 	}
2440 
2441 	/*
2442 	 * At this point, we're dealing with a unicast frame
2443 	 * going to a different interface.
2444 	 */
2445 	if ((dst_if->if_drv_flags & IFF_DRV_RUNNING) == 0)
2446 		goto drop;
2447 
2448 	dbif = bridge_lookup_member_if(sc, dst_if);
2449 	if (dbif == NULL)
2450 		/* Not a member of the bridge (anymore?) */
2451 		goto drop;
2452 
2453 	/* Private segments can not talk to each other */
2454 	if (sbif->bif_flags & dbif->bif_flags & IFBIF_PRIVATE)
2455 		goto drop;
2456 
2457 	if ((dbif->bif_flags & IFBIF_STP) &&
2458 	    dbif->bif_stp.bp_state == BSTP_IFSTATE_DISCARDING)
2459 		goto drop;
2460 
2461 	if (PFIL_HOOKED_OUT(V_inet_pfil_head)
2462 #ifdef INET6
2463 	    || PFIL_HOOKED_OUT(V_inet6_pfil_head)
2464 #endif
2465 	    ) {
2466 		if (bridge_pfil(&m, ifp, dst_if, PFIL_OUT) != 0)
2467 			return;
2468 		if (m == NULL)
2469 			return;
2470 	}
2471 
2472 	bridge_enqueue(sc, dst_if, m);
2473 	return;
2474 
2475 drop:
2476 	m_freem(m);
2477 }
2478 
2479 /*
2480  * bridge_input:
2481  *
2482  *	Receive input from a member interface.  Queue the packet for
2483  *	bridging if it is not for us.
2484  */
2485 static struct mbuf *
2486 bridge_input(struct ifnet *ifp, struct mbuf *m)
2487 {
2488 	struct bridge_softc *sc;
2489 	struct bridge_iflist *bif, *bif2;
2490 	struct ifnet *bifp;
2491 	struct ether_header *eh;
2492 	struct mbuf *mc, *mc2;
2493 	uint16_t vlan;
2494 	int error;
2495 
2496 	NET_EPOCH_ASSERT();
2497 
2498 	eh = mtod(m, struct ether_header *);
2499 	vlan = VLANTAGOF(m);
2500 
2501 	sc = ifp->if_bridge;
2502 	if (sc == NULL) {
2503 		/*
2504 		 * This packet originated from the bridge itself, so it must
2505 		 * have been transmitted by netmap.  Derive the "source"
2506 		 * interface from the source address and drop the packet if the
2507 		 * source address isn't known.
2508 		 */
2509 		KASSERT((m->m_flags & M_BRIDGE_INJECT) != 0,
2510 		    ("%s: ifnet %p missing a bridge softc", __func__, ifp));
2511 		sc = if_getsoftc(ifp);
2512 		ifp = bridge_rtlookup(sc, eh->ether_shost, vlan);
2513 		if (ifp == NULL) {
2514 			if_inc_counter(sc->sc_ifp, IFCOUNTER_IERRORS, 1);
2515 			m_freem(m);
2516 			return (NULL);
2517 		}
2518 		m->m_pkthdr.rcvif = ifp;
2519 	}
2520 	bifp = sc->sc_ifp;
2521 	if ((bifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
2522 		return (m);
2523 
2524 	/*
2525 	 * Implement support for bridge monitoring. If this flag has been
2526 	 * set on this interface, discard the packet once we push it through
2527 	 * the bpf(4) machinery, but before we do, increment the byte and
2528 	 * packet counters associated with this interface.
2529 	 */
2530 	if ((bifp->if_flags & IFF_MONITOR) != 0) {
2531 		m->m_pkthdr.rcvif  = bifp;
2532 		ETHER_BPF_MTAP(bifp, m);
2533 		if_inc_counter(bifp, IFCOUNTER_IPACKETS, 1);
2534 		if_inc_counter(bifp, IFCOUNTER_IBYTES, m->m_pkthdr.len);
2535 		m_freem(m);
2536 		return (NULL);
2537 	}
2538 	bif = bridge_lookup_member_if(sc, ifp);
2539 	if (bif == NULL) {
2540 		return (m);
2541 	}
2542 
2543 	bridge_span(sc, m);
2544 
2545 	if (m->m_flags & (M_BCAST|M_MCAST)) {
2546 		/* Tap off 802.1D packets; they do not get forwarded. */
2547 		if (memcmp(eh->ether_dhost, bstp_etheraddr,
2548 		    ETHER_ADDR_LEN) == 0) {
2549 			bstp_input(&bif->bif_stp, ifp, m); /* consumes mbuf */
2550 			return (NULL);
2551 		}
2552 
2553 		if ((bif->bif_flags & IFBIF_STP) &&
2554 		    bif->bif_stp.bp_state == BSTP_IFSTATE_DISCARDING) {
2555 			return (m);
2556 		}
2557 
2558 		/*
2559 		 * Make a deep copy of the packet and enqueue the copy
2560 		 * for bridge processing; return the original packet for
2561 		 * local processing.
2562 		 */
2563 		mc = m_dup(m, M_NOWAIT);
2564 		if (mc == NULL) {
2565 			return (m);
2566 		}
2567 
2568 		/* Perform the bridge forwarding function with the copy. */
2569 		bridge_forward(sc, bif, mc);
2570 
2571 #ifdef DEV_NETMAP
2572 		/*
2573 		 * If netmap is enabled and has not already seen this packet,
2574 		 * then it will be consumed by bridge_forward().
2575 		 */
2576 		if ((if_getcapenable(bifp) & IFCAP_NETMAP) != 0 &&
2577 		    (m->m_flags & M_BRIDGE_INJECT) == 0) {
2578 			m_freem(m);
2579 			return (NULL);
2580 		}
2581 #endif
2582 
2583 		/*
2584 		 * Reinject the mbuf as arriving on the bridge so we have a
2585 		 * chance at claiming multicast packets. We can not loop back
2586 		 * here from ether_input as a bridge is never a member of a
2587 		 * bridge.
2588 		 */
2589 		KASSERT(bifp->if_bridge == NULL,
2590 		    ("loop created in bridge_input"));
2591 		mc2 = m_dup(m, M_NOWAIT);
2592 		if (mc2 != NULL) {
2593 			/* Keep the layer3 header aligned */
2594 			int i = min(mc2->m_pkthdr.len, max_protohdr);
2595 			mc2 = m_copyup(mc2, i, ETHER_ALIGN);
2596 		}
2597 		if (mc2 != NULL) {
2598 			mc2->m_pkthdr.rcvif = bifp;
2599 			mc2->m_flags &= ~M_BRIDGE_INJECT;
2600 			sc->sc_if_input(bifp, mc2);
2601 		}
2602 
2603 		/* Return the original packet for local processing. */
2604 		return (m);
2605 	}
2606 
2607 	if ((bif->bif_flags & IFBIF_STP) &&
2608 	    bif->bif_stp.bp_state == BSTP_IFSTATE_DISCARDING) {
2609 		return (m);
2610 	}
2611 
2612 #if defined(INET) || defined(INET6)
2613 #define	CARP_CHECK_WE_ARE_DST(iface) \
2614 	((iface)->if_carp && (*carp_forus_p)((iface), eh->ether_dhost))
2615 #define	CARP_CHECK_WE_ARE_SRC(iface) \
2616 	((iface)->if_carp && (*carp_forus_p)((iface), eh->ether_shost))
2617 #else
2618 #define	CARP_CHECK_WE_ARE_DST(iface)	false
2619 #define	CARP_CHECK_WE_ARE_SRC(iface)	false
2620 #endif
2621 
2622 #ifdef INET6
2623 #define	PFIL_HOOKED_INET6	PFIL_HOOKED_IN(V_inet6_pfil_head)
2624 #else
2625 #define	PFIL_HOOKED_INET6	false
2626 #endif
2627 
2628 #ifdef DEV_NETMAP
2629 #define	GRAB_FOR_NETMAP(ifp, m) do {					\
2630 	if ((if_getcapenable(ifp) & IFCAP_NETMAP) != 0 &&		\
2631 	    ((m)->m_flags & M_BRIDGE_INJECT) == 0) {			\
2632 		(ifp)->if_input(ifp, m);				\
2633 		return (NULL);						\
2634 	}								\
2635 } while (0)
2636 #else
2637 #define	GRAB_FOR_NETMAP(ifp, m)
2638 #endif
2639 
2640 #define GRAB_OUR_PACKETS(iface)						\
2641 	if ((iface)->if_type == IFT_GIF)				\
2642 		continue;						\
2643 	/* It is destined for us. */					\
2644 	if (memcmp(IF_LLADDR(iface), eh->ether_dhost, ETHER_ADDR_LEN) == 0 || \
2645 	    CARP_CHECK_WE_ARE_DST(iface)) {				\
2646 		if (bif->bif_flags & IFBIF_LEARNING) {			\
2647 			error = bridge_rtupdate(sc, eh->ether_shost,	\
2648 			    vlan, bif, 0, IFBAF_DYNAMIC);		\
2649 			if (error && bif->bif_addrmax) {		\
2650 				m_freem(m);				\
2651 				return (NULL);				\
2652 			}						\
2653 		}							\
2654 		m->m_pkthdr.rcvif = iface;				\
2655 		if ((iface) == ifp) {					\
2656 			/* Skip bridge processing... src == dest */	\
2657 			return (m);					\
2658 		}							\
2659 		/* It's passing over or to the bridge, locally. */	\
2660 		ETHER_BPF_MTAP(bifp, m);				\
2661 		if_inc_counter(bifp, IFCOUNTER_IPACKETS, 1);		\
2662 		if_inc_counter(bifp, IFCOUNTER_IBYTES, m->m_pkthdr.len);\
2663 		/* Hand the packet over to netmap if necessary. */	\
2664 		GRAB_FOR_NETMAP(bifp, m);				\
2665 		/* Filter on the physical interface. */			\
2666 		if (V_pfil_local_phys && (PFIL_HOOKED_IN(V_inet_pfil_head) || \
2667 		    PFIL_HOOKED_INET6)) {				\
2668 			if (bridge_pfil(&m, NULL, ifp,			\
2669 			    PFIL_IN) != 0 || m == NULL) {		\
2670 				return (NULL);				\
2671 			}						\
2672 		}							\
2673 		if ((iface) != bifp)					\
2674 			ETHER_BPF_MTAP(iface, m);			\
2675 		return (m);						\
2676 	}								\
2677 									\
2678 	/* We just received a packet that we sent out. */		\
2679 	if (memcmp(IF_LLADDR(iface), eh->ether_shost, ETHER_ADDR_LEN) == 0 || \
2680 	    CARP_CHECK_WE_ARE_SRC(iface)) {				\
2681 		m_freem(m);						\
2682 		return (NULL);						\
2683 	}
2684 
2685 	/*
2686 	 * Unicast.  Make sure it's not for the bridge.
2687 	 */
2688 	do { GRAB_OUR_PACKETS(bifp) } while (0);
2689 
2690 	/*
2691 	 * Give a chance for ifp at first priority. This will help when	the
2692 	 * packet comes through the interface like VLAN's with the same MACs
2693 	 * on several interfaces from the same bridge. This also will save
2694 	 * some CPU cycles in case the destination interface and the input
2695 	 * interface (eq ifp) are the same.
2696 	 */
2697 	do { GRAB_OUR_PACKETS(ifp) } while (0);
2698 
2699 	/* Now check the all bridge members. */
2700 	CK_LIST_FOREACH(bif2, &sc->sc_iflist, bif_next) {
2701 		GRAB_OUR_PACKETS(bif2->bif_ifp)
2702 	}
2703 
2704 #undef CARP_CHECK_WE_ARE_DST
2705 #undef CARP_CHECK_WE_ARE_SRC
2706 #undef PFIL_HOOKED_INET6
2707 #undef GRAB_FOR_NETMAP
2708 #undef GRAB_OUR_PACKETS
2709 
2710 	/* Perform the bridge forwarding function. */
2711 	bridge_forward(sc, bif, m);
2712 
2713 	return (NULL);
2714 }
2715 
2716 /*
2717  * Inject a packet back into the host ethernet stack.  This will generally only
2718  * be used by netmap when an application writes to the host TX ring.  The
2719  * M_BRIDGE_INJECT flag ensures that the packet is re-routed to the bridge
2720  * interface after ethernet processing.
2721  */
2722 static void
2723 bridge_inject(struct ifnet *ifp, struct mbuf *m)
2724 {
2725 	struct bridge_softc *sc;
2726 
2727 	KASSERT((if_getcapenable(ifp) & IFCAP_NETMAP) != 0,
2728 	    ("%s: iface %s is not running in netmap mode",
2729 	    __func__, if_name(ifp)));
2730 	KASSERT((m->m_flags & M_BRIDGE_INJECT) == 0,
2731 	    ("%s: mbuf %p has M_BRIDGE_INJECT set", __func__, m));
2732 
2733 	m->m_flags |= M_BRIDGE_INJECT;
2734 	sc = if_getsoftc(ifp);
2735 	sc->sc_if_input(ifp, m);
2736 }
2737 
2738 /*
2739  * bridge_broadcast:
2740  *
2741  *	Send a frame to all interfaces that are members of
2742  *	the bridge, except for the one on which the packet
2743  *	arrived.
2744  *
2745  *	NOTE: Releases the lock on return.
2746  */
2747 static void
2748 bridge_broadcast(struct bridge_softc *sc, struct ifnet *src_if,
2749     struct mbuf *m, int runfilt)
2750 {
2751 	struct bridge_iflist *dbif, *sbif;
2752 	struct mbuf *mc;
2753 	struct ifnet *dst_if;
2754 	int used = 0, i;
2755 
2756 	NET_EPOCH_ASSERT();
2757 
2758 	sbif = bridge_lookup_member_if(sc, src_if);
2759 
2760 	/* Filter on the bridge interface before broadcasting */
2761 	if (runfilt && (PFIL_HOOKED_OUT(V_inet_pfil_head)
2762 #ifdef INET6
2763 	    || PFIL_HOOKED_OUT(V_inet6_pfil_head)
2764 #endif
2765 	    )) {
2766 		if (bridge_pfil(&m, sc->sc_ifp, NULL, PFIL_OUT) != 0)
2767 			return;
2768 		if (m == NULL)
2769 			return;
2770 	}
2771 
2772 	CK_LIST_FOREACH(dbif, &sc->sc_iflist, bif_next) {
2773 		dst_if = dbif->bif_ifp;
2774 		if (dst_if == src_if)
2775 			continue;
2776 
2777 		/* Private segments can not talk to each other */
2778 		if (sbif && (sbif->bif_flags & dbif->bif_flags & IFBIF_PRIVATE))
2779 			continue;
2780 
2781 		if ((dbif->bif_flags & IFBIF_STP) &&
2782 		    dbif->bif_stp.bp_state == BSTP_IFSTATE_DISCARDING)
2783 			continue;
2784 
2785 		if ((dbif->bif_flags & IFBIF_DISCOVER) == 0 &&
2786 		    (m->m_flags & (M_BCAST|M_MCAST)) == 0)
2787 			continue;
2788 
2789 		if ((dst_if->if_drv_flags & IFF_DRV_RUNNING) == 0)
2790 			continue;
2791 
2792 		if (CK_LIST_NEXT(dbif, bif_next) == NULL) {
2793 			mc = m;
2794 			used = 1;
2795 		} else {
2796 			mc = m_dup(m, M_NOWAIT);
2797 			if (mc == NULL) {
2798 				if_inc_counter(sc->sc_ifp, IFCOUNTER_OERRORS, 1);
2799 				continue;
2800 			}
2801 		}
2802 
2803 		/*
2804 		 * Filter on the output interface. Pass a NULL bridge interface
2805 		 * pointer so we do not redundantly filter on the bridge for
2806 		 * each interface we broadcast on.
2807 		 */
2808 		if (runfilt && (PFIL_HOOKED_OUT(V_inet_pfil_head)
2809 #ifdef INET6
2810 		    || PFIL_HOOKED_OUT(V_inet6_pfil_head)
2811 #endif
2812 		    )) {
2813 			if (used == 0) {
2814 				/* Keep the layer3 header aligned */
2815 				i = min(mc->m_pkthdr.len, max_protohdr);
2816 				mc = m_copyup(mc, i, ETHER_ALIGN);
2817 				if (mc == NULL) {
2818 					if_inc_counter(sc->sc_ifp, IFCOUNTER_OERRORS, 1);
2819 					continue;
2820 				}
2821 			}
2822 			if (bridge_pfil(&mc, NULL, dst_if, PFIL_OUT) != 0)
2823 				continue;
2824 			if (mc == NULL)
2825 				continue;
2826 		}
2827 
2828 		bridge_enqueue(sc, dst_if, mc);
2829 	}
2830 	if (used == 0)
2831 		m_freem(m);
2832 }
2833 
2834 /*
2835  * bridge_span:
2836  *
2837  *	Duplicate a packet out one or more interfaces that are in span mode,
2838  *	the original mbuf is unmodified.
2839  */
2840 static void
2841 bridge_span(struct bridge_softc *sc, struct mbuf *m)
2842 {
2843 	struct bridge_iflist *bif;
2844 	struct ifnet *dst_if;
2845 	struct mbuf *mc;
2846 
2847 	NET_EPOCH_ASSERT();
2848 
2849 	if (CK_LIST_EMPTY(&sc->sc_spanlist))
2850 		return;
2851 
2852 	CK_LIST_FOREACH(bif, &sc->sc_spanlist, bif_next) {
2853 		dst_if = bif->bif_ifp;
2854 
2855 		if ((dst_if->if_drv_flags & IFF_DRV_RUNNING) == 0)
2856 			continue;
2857 
2858 		mc = m_dup(m, M_NOWAIT);
2859 		if (mc == NULL) {
2860 			if_inc_counter(sc->sc_ifp, IFCOUNTER_OERRORS, 1);
2861 			continue;
2862 		}
2863 
2864 		bridge_enqueue(sc, dst_if, mc);
2865 	}
2866 }
2867 
2868 /*
2869  * bridge_rtupdate:
2870  *
2871  *	Add a bridge routing entry.
2872  */
2873 static int
2874 bridge_rtupdate(struct bridge_softc *sc, const uint8_t *dst, uint16_t vlan,
2875     struct bridge_iflist *bif, int setflags, uint8_t flags)
2876 {
2877 	struct bridge_rtnode *brt;
2878 	struct bridge_iflist *obif;
2879 	int error;
2880 
2881 	BRIDGE_LOCK_OR_NET_EPOCH_ASSERT(sc);
2882 
2883 	/* Check the source address is valid and not multicast. */
2884 	if (ETHER_IS_MULTICAST(dst) ||
2885 	    (dst[0] == 0 && dst[1] == 0 && dst[2] == 0 &&
2886 	     dst[3] == 0 && dst[4] == 0 && dst[5] == 0) != 0)
2887 		return (EINVAL);
2888 
2889 	/* 802.1p frames map to vlan 1 */
2890 	if (vlan == 0)
2891 		vlan = 1;
2892 
2893 	/*
2894 	 * A route for this destination might already exist.  If so,
2895 	 * update it, otherwise create a new one.
2896 	 */
2897 	if ((brt = bridge_rtnode_lookup(sc, dst, vlan)) == NULL) {
2898 		BRIDGE_RT_LOCK(sc);
2899 
2900 		/* Check again, now that we have the lock. There could have
2901 		 * been a race and we only want to insert this once. */
2902 		if (bridge_rtnode_lookup(sc, dst, vlan) != NULL) {
2903 			BRIDGE_RT_UNLOCK(sc);
2904 			return (0);
2905 		}
2906 
2907 		if (sc->sc_brtcnt >= sc->sc_brtmax) {
2908 			sc->sc_brtexceeded++;
2909 			BRIDGE_RT_UNLOCK(sc);
2910 			return (ENOSPC);
2911 		}
2912 		/* Check per interface address limits (if enabled) */
2913 		if (bif->bif_addrmax && bif->bif_addrcnt >= bif->bif_addrmax) {
2914 			bif->bif_addrexceeded++;
2915 			BRIDGE_RT_UNLOCK(sc);
2916 			return (ENOSPC);
2917 		}
2918 
2919 		/*
2920 		 * Allocate a new bridge forwarding node, and
2921 		 * initialize the expiration time and Ethernet
2922 		 * address.
2923 		 */
2924 		brt = uma_zalloc(V_bridge_rtnode_zone, M_NOWAIT | M_ZERO);
2925 		if (brt == NULL) {
2926 			BRIDGE_RT_UNLOCK(sc);
2927 			return (ENOMEM);
2928 		}
2929 		brt->brt_vnet = curvnet;
2930 
2931 		if (bif->bif_flags & IFBIF_STICKY)
2932 			brt->brt_flags = IFBAF_STICKY;
2933 		else
2934 			brt->brt_flags = IFBAF_DYNAMIC;
2935 
2936 		memcpy(brt->brt_addr, dst, ETHER_ADDR_LEN);
2937 		brt->brt_vlan = vlan;
2938 
2939 		if ((error = bridge_rtnode_insert(sc, brt)) != 0) {
2940 			uma_zfree(V_bridge_rtnode_zone, brt);
2941 			BRIDGE_RT_UNLOCK(sc);
2942 			return (error);
2943 		}
2944 		brt->brt_dst = bif;
2945 		bif->bif_addrcnt++;
2946 
2947 		BRIDGE_RT_UNLOCK(sc);
2948 	}
2949 
2950 	if ((brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC &&
2951 	    (obif = brt->brt_dst) != bif) {
2952 		BRIDGE_RT_LOCK(sc);
2953 		brt->brt_dst->bif_addrcnt--;
2954 		brt->brt_dst = bif;
2955 		brt->brt_dst->bif_addrcnt++;
2956 		BRIDGE_RT_UNLOCK(sc);
2957 
2958 		if (V_log_mac_flap &&
2959 		    ppsratecheck(&V_log_last, &V_log_count, V_log_interval)) {
2960 			uint8_t *addr = &brt->brt_addr[0];
2961 			log(LOG_NOTICE,
2962 			    "%s: mac address %02x:%02x:%02x:%02x:%02x:%02x vlan %d moved from %s to %s\n",
2963 			    sc->sc_ifp->if_xname,
2964 			    addr[0], addr[1], addr[2], addr[3], addr[4], addr[5],
2965 			    brt->brt_vlan,
2966 			    obif->bif_ifp->if_xname,
2967 			    bif->bif_ifp->if_xname);
2968 		}
2969 	}
2970 
2971 	if ((flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC)
2972 		brt->brt_expire = time_uptime + sc->sc_brttimeout;
2973 	if (setflags)
2974 		brt->brt_flags = flags;
2975 
2976 	return (0);
2977 }
2978 
2979 /*
2980  * bridge_rtlookup:
2981  *
2982  *	Lookup the destination interface for an address.
2983  */
2984 static struct ifnet *
2985 bridge_rtlookup(struct bridge_softc *sc, const uint8_t *addr, uint16_t vlan)
2986 {
2987 	struct bridge_rtnode *brt;
2988 
2989 	NET_EPOCH_ASSERT();
2990 
2991 	if ((brt = bridge_rtnode_lookup(sc, addr, vlan)) == NULL)
2992 		return (NULL);
2993 
2994 	return (brt->brt_ifp);
2995 }
2996 
2997 /*
2998  * bridge_rttrim:
2999  *
3000  *	Trim the routine table so that we have a number
3001  *	of routing entries less than or equal to the
3002  *	maximum number.
3003  */
3004 static void
3005 bridge_rttrim(struct bridge_softc *sc)
3006 {
3007 	struct bridge_rtnode *brt, *nbrt;
3008 
3009 	NET_EPOCH_ASSERT();
3010 	BRIDGE_RT_LOCK_ASSERT(sc);
3011 
3012 	/* Make sure we actually need to do this. */
3013 	if (sc->sc_brtcnt <= sc->sc_brtmax)
3014 		return;
3015 
3016 	/* Force an aging cycle; this might trim enough addresses. */
3017 	bridge_rtage(sc);
3018 	if (sc->sc_brtcnt <= sc->sc_brtmax)
3019 		return;
3020 
3021 	CK_LIST_FOREACH_SAFE(brt, &sc->sc_rtlist, brt_list, nbrt) {
3022 		if ((brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC) {
3023 			bridge_rtnode_destroy(sc, brt);
3024 			if (sc->sc_brtcnt <= sc->sc_brtmax)
3025 				return;
3026 		}
3027 	}
3028 }
3029 
3030 /*
3031  * bridge_timer:
3032  *
3033  *	Aging timer for the bridge.
3034  */
3035 static void
3036 bridge_timer(void *arg)
3037 {
3038 	struct bridge_softc *sc = arg;
3039 
3040 	BRIDGE_RT_LOCK_ASSERT(sc);
3041 
3042 	/* Destruction of rtnodes requires a proper vnet context */
3043 	CURVNET_SET(sc->sc_ifp->if_vnet);
3044 	bridge_rtage(sc);
3045 
3046 	if (sc->sc_ifp->if_drv_flags & IFF_DRV_RUNNING)
3047 		callout_reset(&sc->sc_brcallout,
3048 		    bridge_rtable_prune_period * hz, bridge_timer, sc);
3049 	CURVNET_RESTORE();
3050 }
3051 
3052 /*
3053  * bridge_rtage:
3054  *
3055  *	Perform an aging cycle.
3056  */
3057 static void
3058 bridge_rtage(struct bridge_softc *sc)
3059 {
3060 	struct bridge_rtnode *brt, *nbrt;
3061 
3062 	BRIDGE_RT_LOCK_ASSERT(sc);
3063 
3064 	CK_LIST_FOREACH_SAFE(brt, &sc->sc_rtlist, brt_list, nbrt) {
3065 		if ((brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC) {
3066 			if (time_uptime >= brt->brt_expire)
3067 				bridge_rtnode_destroy(sc, brt);
3068 		}
3069 	}
3070 }
3071 
3072 /*
3073  * bridge_rtflush:
3074  *
3075  *	Remove all dynamic addresses from the bridge.
3076  */
3077 static void
3078 bridge_rtflush(struct bridge_softc *sc, int full)
3079 {
3080 	struct bridge_rtnode *brt, *nbrt;
3081 
3082 	BRIDGE_RT_LOCK_ASSERT(sc);
3083 
3084 	CK_LIST_FOREACH_SAFE(brt, &sc->sc_rtlist, brt_list, nbrt) {
3085 		if (full || (brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC)
3086 			bridge_rtnode_destroy(sc, brt);
3087 	}
3088 }
3089 
3090 /*
3091  * bridge_rtdaddr:
3092  *
3093  *	Remove an address from the table.
3094  */
3095 static int
3096 bridge_rtdaddr(struct bridge_softc *sc, const uint8_t *addr, uint16_t vlan)
3097 {
3098 	struct bridge_rtnode *brt;
3099 	int found = 0;
3100 
3101 	BRIDGE_RT_LOCK(sc);
3102 
3103 	/*
3104 	 * If vlan is zero then we want to delete for all vlans so the lookup
3105 	 * may return more than one.
3106 	 */
3107 	while ((brt = bridge_rtnode_lookup(sc, addr, vlan)) != NULL) {
3108 		bridge_rtnode_destroy(sc, brt);
3109 		found = 1;
3110 	}
3111 
3112 	BRIDGE_RT_UNLOCK(sc);
3113 
3114 	return (found ? 0 : ENOENT);
3115 }
3116 
3117 /*
3118  * bridge_rtdelete:
3119  *
3120  *	Delete routes to a speicifc member interface.
3121  */
3122 static void
3123 bridge_rtdelete(struct bridge_softc *sc, struct ifnet *ifp, int full)
3124 {
3125 	struct bridge_rtnode *brt, *nbrt;
3126 
3127 	BRIDGE_RT_LOCK_ASSERT(sc);
3128 
3129 	CK_LIST_FOREACH_SAFE(brt, &sc->sc_rtlist, brt_list, nbrt) {
3130 		if (brt->brt_ifp == ifp && (full ||
3131 			    (brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC))
3132 			bridge_rtnode_destroy(sc, brt);
3133 	}
3134 }
3135 
3136 /*
3137  * bridge_rtable_init:
3138  *
3139  *	Initialize the route table for this bridge.
3140  */
3141 static void
3142 bridge_rtable_init(struct bridge_softc *sc)
3143 {
3144 	int i;
3145 
3146 	sc->sc_rthash = malloc(sizeof(*sc->sc_rthash) * BRIDGE_RTHASH_SIZE,
3147 	    M_DEVBUF, M_WAITOK);
3148 
3149 	for (i = 0; i < BRIDGE_RTHASH_SIZE; i++)
3150 		CK_LIST_INIT(&sc->sc_rthash[i]);
3151 
3152 	sc->sc_rthash_key = arc4random();
3153 	CK_LIST_INIT(&sc->sc_rtlist);
3154 }
3155 
3156 /*
3157  * bridge_rtable_fini:
3158  *
3159  *	Deconstruct the route table for this bridge.
3160  */
3161 static void
3162 bridge_rtable_fini(struct bridge_softc *sc)
3163 {
3164 
3165 	KASSERT(sc->sc_brtcnt == 0,
3166 	    ("%s: %d bridge routes referenced", __func__, sc->sc_brtcnt));
3167 	free(sc->sc_rthash, M_DEVBUF);
3168 }
3169 
3170 /*
3171  * The following hash function is adapted from "Hash Functions" by Bob Jenkins
3172  * ("Algorithm Alley", Dr. Dobbs Journal, September 1997).
3173  */
3174 #define	mix(a, b, c)							\
3175 do {									\
3176 	a -= b; a -= c; a ^= (c >> 13);					\
3177 	b -= c; b -= a; b ^= (a << 8);					\
3178 	c -= a; c -= b; c ^= (b >> 13);					\
3179 	a -= b; a -= c; a ^= (c >> 12);					\
3180 	b -= c; b -= a; b ^= (a << 16);					\
3181 	c -= a; c -= b; c ^= (b >> 5);					\
3182 	a -= b; a -= c; a ^= (c >> 3);					\
3183 	b -= c; b -= a; b ^= (a << 10);					\
3184 	c -= a; c -= b; c ^= (b >> 15);					\
3185 } while (/*CONSTCOND*/0)
3186 
3187 static __inline uint32_t
3188 bridge_rthash(struct bridge_softc *sc, const uint8_t *addr)
3189 {
3190 	uint32_t a = 0x9e3779b9, b = 0x9e3779b9, c = sc->sc_rthash_key;
3191 
3192 	b += addr[5] << 8;
3193 	b += addr[4];
3194 	a += addr[3] << 24;
3195 	a += addr[2] << 16;
3196 	a += addr[1] << 8;
3197 	a += addr[0];
3198 
3199 	mix(a, b, c);
3200 
3201 	return (c & BRIDGE_RTHASH_MASK);
3202 }
3203 
3204 #undef mix
3205 
3206 static int
3207 bridge_rtnode_addr_cmp(const uint8_t *a, const uint8_t *b)
3208 {
3209 	int i, d;
3210 
3211 	for (i = 0, d = 0; i < ETHER_ADDR_LEN && d == 0; i++) {
3212 		d = ((int)a[i]) - ((int)b[i]);
3213 	}
3214 
3215 	return (d);
3216 }
3217 
3218 /*
3219  * bridge_rtnode_lookup:
3220  *
3221  *	Look up a bridge route node for the specified destination. Compare the
3222  *	vlan id or if zero then just return the first match.
3223  */
3224 static struct bridge_rtnode *
3225 bridge_rtnode_lookup(struct bridge_softc *sc, const uint8_t *addr, uint16_t vlan)
3226 {
3227 	struct bridge_rtnode *brt;
3228 	uint32_t hash;
3229 	int dir;
3230 
3231 	BRIDGE_RT_LOCK_OR_NET_EPOCH_ASSERT(sc);
3232 
3233 	hash = bridge_rthash(sc, addr);
3234 	CK_LIST_FOREACH(brt, &sc->sc_rthash[hash], brt_hash) {
3235 		dir = bridge_rtnode_addr_cmp(addr, brt->brt_addr);
3236 		if (dir == 0 && (brt->brt_vlan == vlan || vlan == 0))
3237 			return (brt);
3238 		if (dir > 0)
3239 			return (NULL);
3240 	}
3241 
3242 	return (NULL);
3243 }
3244 
3245 /*
3246  * bridge_rtnode_insert:
3247  *
3248  *	Insert the specified bridge node into the route table.  We
3249  *	assume the entry is not already in the table.
3250  */
3251 static int
3252 bridge_rtnode_insert(struct bridge_softc *sc, struct bridge_rtnode *brt)
3253 {
3254 	struct bridge_rtnode *lbrt;
3255 	uint32_t hash;
3256 	int dir;
3257 
3258 	BRIDGE_RT_LOCK_ASSERT(sc);
3259 
3260 	hash = bridge_rthash(sc, brt->brt_addr);
3261 
3262 	lbrt = CK_LIST_FIRST(&sc->sc_rthash[hash]);
3263 	if (lbrt == NULL) {
3264 		CK_LIST_INSERT_HEAD(&sc->sc_rthash[hash], brt, brt_hash);
3265 		goto out;
3266 	}
3267 
3268 	do {
3269 		dir = bridge_rtnode_addr_cmp(brt->brt_addr, lbrt->brt_addr);
3270 		if (dir == 0 && brt->brt_vlan == lbrt->brt_vlan)
3271 			return (EEXIST);
3272 		if (dir > 0) {
3273 			CK_LIST_INSERT_BEFORE(lbrt, brt, brt_hash);
3274 			goto out;
3275 		}
3276 		if (CK_LIST_NEXT(lbrt, brt_hash) == NULL) {
3277 			CK_LIST_INSERT_AFTER(lbrt, brt, brt_hash);
3278 			goto out;
3279 		}
3280 		lbrt = CK_LIST_NEXT(lbrt, brt_hash);
3281 	} while (lbrt != NULL);
3282 
3283 #ifdef DIAGNOSTIC
3284 	panic("bridge_rtnode_insert: impossible");
3285 #endif
3286 
3287 out:
3288 	CK_LIST_INSERT_HEAD(&sc->sc_rtlist, brt, brt_list);
3289 	sc->sc_brtcnt++;
3290 
3291 	return (0);
3292 }
3293 
3294 static void
3295 bridge_rtnode_destroy_cb(struct epoch_context *ctx)
3296 {
3297 	struct bridge_rtnode *brt;
3298 
3299 	brt = __containerof(ctx, struct bridge_rtnode, brt_epoch_ctx);
3300 
3301 	CURVNET_SET(brt->brt_vnet);
3302 	uma_zfree(V_bridge_rtnode_zone, brt);
3303 	CURVNET_RESTORE();
3304 }
3305 
3306 /*
3307  * bridge_rtnode_destroy:
3308  *
3309  *	Destroy a bridge rtnode.
3310  */
3311 static void
3312 bridge_rtnode_destroy(struct bridge_softc *sc, struct bridge_rtnode *brt)
3313 {
3314 	BRIDGE_RT_LOCK_ASSERT(sc);
3315 
3316 	CK_LIST_REMOVE(brt, brt_hash);
3317 
3318 	CK_LIST_REMOVE(brt, brt_list);
3319 	sc->sc_brtcnt--;
3320 	brt->brt_dst->bif_addrcnt--;
3321 
3322 	NET_EPOCH_CALL(bridge_rtnode_destroy_cb, &brt->brt_epoch_ctx);
3323 }
3324 
3325 /*
3326  * bridge_rtable_expire:
3327  *
3328  *	Set the expiry time for all routes on an interface.
3329  */
3330 static void
3331 bridge_rtable_expire(struct ifnet *ifp, int age)
3332 {
3333 	struct bridge_softc *sc = ifp->if_bridge;
3334 	struct bridge_rtnode *brt;
3335 
3336 	CURVNET_SET(ifp->if_vnet);
3337 	BRIDGE_RT_LOCK(sc);
3338 
3339 	/*
3340 	 * If the age is zero then flush, otherwise set all the expiry times to
3341 	 * age for the interface
3342 	 */
3343 	if (age == 0)
3344 		bridge_rtdelete(sc, ifp, IFBF_FLUSHDYN);
3345 	else {
3346 		CK_LIST_FOREACH(brt, &sc->sc_rtlist, brt_list) {
3347 			/* Cap the expiry time to 'age' */
3348 			if (brt->brt_ifp == ifp &&
3349 			    brt->brt_expire > time_uptime + age &&
3350 			    (brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC)
3351 				brt->brt_expire = time_uptime + age;
3352 		}
3353 	}
3354 	BRIDGE_RT_UNLOCK(sc);
3355 	CURVNET_RESTORE();
3356 }
3357 
3358 /*
3359  * bridge_state_change:
3360  *
3361  *	Callback from the bridgestp code when a port changes states.
3362  */
3363 static void
3364 bridge_state_change(struct ifnet *ifp, int state)
3365 {
3366 	struct bridge_softc *sc = ifp->if_bridge;
3367 	static const char *stpstates[] = {
3368 		"disabled",
3369 		"listening",
3370 		"learning",
3371 		"forwarding",
3372 		"blocking",
3373 		"discarding"
3374 	};
3375 
3376 	CURVNET_SET(ifp->if_vnet);
3377 	if (V_log_stp)
3378 		log(LOG_NOTICE, "%s: state changed to %s on %s\n",
3379 		    sc->sc_ifp->if_xname, stpstates[state], ifp->if_xname);
3380 	CURVNET_RESTORE();
3381 }
3382 
3383 /*
3384  * Send bridge packets through pfil if they are one of the types pfil can deal
3385  * with, or if they are ARP or REVARP.  (pfil will pass ARP and REVARP without
3386  * question.) If *bifp or *ifp are NULL then packet filtering is skipped for
3387  * that interface.
3388  */
3389 static int
3390 bridge_pfil(struct mbuf **mp, struct ifnet *bifp, struct ifnet *ifp, int dir)
3391 {
3392 	int snap, error, i, hlen;
3393 	struct ether_header *eh1, eh2;
3394 	struct ip *ip;
3395 	struct llc llc1;
3396 	u_int16_t ether_type;
3397 	pfil_return_t rv;
3398 
3399 	snap = 0;
3400 	error = -1;	/* Default error if not error == 0 */
3401 
3402 #if 0
3403 	/* we may return with the IP fields swapped, ensure its not shared */
3404 	KASSERT(M_WRITABLE(*mp), ("%s: modifying a shared mbuf", __func__));
3405 #endif
3406 
3407 	if (V_pfil_bridge == 0 && V_pfil_member == 0 && V_pfil_ipfw == 0)
3408 		return (0); /* filtering is disabled */
3409 
3410 	i = min((*mp)->m_pkthdr.len, max_protohdr);
3411 	if ((*mp)->m_len < i) {
3412 	    *mp = m_pullup(*mp, i);
3413 	    if (*mp == NULL) {
3414 		printf("%s: m_pullup failed\n", __func__);
3415 		return (-1);
3416 	    }
3417 	}
3418 
3419 	eh1 = mtod(*mp, struct ether_header *);
3420 	ether_type = ntohs(eh1->ether_type);
3421 
3422 	/*
3423 	 * Check for SNAP/LLC.
3424 	 */
3425 	if (ether_type < ETHERMTU) {
3426 		struct llc *llc2 = (struct llc *)(eh1 + 1);
3427 
3428 		if ((*mp)->m_len >= ETHER_HDR_LEN + 8 &&
3429 		    llc2->llc_dsap == LLC_SNAP_LSAP &&
3430 		    llc2->llc_ssap == LLC_SNAP_LSAP &&
3431 		    llc2->llc_control == LLC_UI) {
3432 			ether_type = htons(llc2->llc_un.type_snap.ether_type);
3433 			snap = 1;
3434 		}
3435 	}
3436 
3437 	/*
3438 	 * If we're trying to filter bridge traffic, don't look at anything
3439 	 * other than IP and ARP traffic.  If the filter doesn't understand
3440 	 * IPv6, don't allow IPv6 through the bridge either.  This is lame
3441 	 * since if we really wanted, say, an AppleTalk filter, we are hosed,
3442 	 * but of course we don't have an AppleTalk filter to begin with.
3443 	 * (Note that since pfil doesn't understand ARP it will pass *ALL*
3444 	 * ARP traffic.)
3445 	 */
3446 	switch (ether_type) {
3447 		case ETHERTYPE_ARP:
3448 		case ETHERTYPE_REVARP:
3449 			if (V_pfil_ipfw_arp == 0)
3450 				return (0); /* Automatically pass */
3451 			break;
3452 
3453 		case ETHERTYPE_IP:
3454 #ifdef INET6
3455 		case ETHERTYPE_IPV6:
3456 #endif /* INET6 */
3457 			break;
3458 		default:
3459 			/*
3460 			 * Check to see if the user wants to pass non-ip
3461 			 * packets, these will not be checked by pfil(9) and
3462 			 * passed unconditionally so the default is to drop.
3463 			 */
3464 			if (V_pfil_onlyip)
3465 				goto bad;
3466 	}
3467 
3468 	/* Run the packet through pfil before stripping link headers */
3469 	if (PFIL_HOOKED_OUT(V_link_pfil_head) && V_pfil_ipfw != 0 &&
3470 	    dir == PFIL_OUT && ifp != NULL) {
3471 		switch (pfil_mbuf_out(V_link_pfil_head, mp, ifp, NULL)) {
3472 		case PFIL_DROPPED:
3473 			return (EACCES);
3474 		case PFIL_CONSUMED:
3475 			return (0);
3476 		}
3477 	}
3478 
3479 	/* Strip off the Ethernet header and keep a copy. */
3480 	m_copydata(*mp, 0, ETHER_HDR_LEN, (caddr_t) &eh2);
3481 	m_adj(*mp, ETHER_HDR_LEN);
3482 
3483 	/* Strip off snap header, if present */
3484 	if (snap) {
3485 		m_copydata(*mp, 0, sizeof(struct llc), (caddr_t) &llc1);
3486 		m_adj(*mp, sizeof(struct llc));
3487 	}
3488 
3489 	/*
3490 	 * Check the IP header for alignment and errors
3491 	 */
3492 	if (dir == PFIL_IN) {
3493 		switch (ether_type) {
3494 			case ETHERTYPE_IP:
3495 				error = bridge_ip_checkbasic(mp);
3496 				break;
3497 #ifdef INET6
3498 			case ETHERTYPE_IPV6:
3499 				error = bridge_ip6_checkbasic(mp);
3500 				break;
3501 #endif /* INET6 */
3502 			default:
3503 				error = 0;
3504 		}
3505 		if (error)
3506 			goto bad;
3507 	}
3508 
3509 	error = 0;
3510 
3511 	/*
3512 	 * Run the packet through pfil
3513 	 */
3514 	rv = PFIL_PASS;
3515 	switch (ether_type) {
3516 	case ETHERTYPE_IP:
3517 		/*
3518 		 * Run pfil on the member interface and the bridge, both can
3519 		 * be skipped by clearing pfil_member or pfil_bridge.
3520 		 *
3521 		 * Keep the order:
3522 		 *   in_if -> bridge_if -> out_if
3523 		 */
3524 		if (V_pfil_bridge && dir == PFIL_OUT && bifp != NULL && (rv =
3525 		    pfil_mbuf_out(V_inet_pfil_head, mp, bifp, NULL)) !=
3526 		    PFIL_PASS)
3527 			break;
3528 
3529 		if (V_pfil_member && ifp != NULL) {
3530 			rv = (dir == PFIL_OUT) ?
3531 			    pfil_mbuf_out(V_inet_pfil_head, mp, ifp, NULL) :
3532 			    pfil_mbuf_in(V_inet_pfil_head, mp, ifp, NULL);
3533 			if (rv != PFIL_PASS)
3534 				break;
3535 		}
3536 
3537 		if (V_pfil_bridge && dir == PFIL_IN && bifp != NULL && (rv =
3538 		    pfil_mbuf_in(V_inet_pfil_head, mp, bifp, NULL)) !=
3539 		    PFIL_PASS)
3540 			break;
3541 
3542 		/* check if we need to fragment the packet */
3543 		/* bridge_fragment generates a mbuf chain of packets */
3544 		/* that already include eth headers */
3545 		if (V_pfil_member && ifp != NULL && dir == PFIL_OUT) {
3546 			i = (*mp)->m_pkthdr.len;
3547 			if (i > ifp->if_mtu) {
3548 				error = bridge_fragment(ifp, mp, &eh2, snap,
3549 					    &llc1);
3550 				return (error);
3551 			}
3552 		}
3553 
3554 		/* Recalculate the ip checksum. */
3555 		ip = mtod(*mp, struct ip *);
3556 		hlen = ip->ip_hl << 2;
3557 		if (hlen < sizeof(struct ip))
3558 			goto bad;
3559 		if (hlen > (*mp)->m_len) {
3560 			if ((*mp = m_pullup(*mp, hlen)) == NULL)
3561 				goto bad;
3562 			ip = mtod(*mp, struct ip *);
3563 			if (ip == NULL)
3564 				goto bad;
3565 		}
3566 		ip->ip_sum = 0;
3567 		if (hlen == sizeof(struct ip))
3568 			ip->ip_sum = in_cksum_hdr(ip);
3569 		else
3570 			ip->ip_sum = in_cksum(*mp, hlen);
3571 
3572 		break;
3573 #ifdef INET6
3574 	case ETHERTYPE_IPV6:
3575 		if (V_pfil_bridge && dir == PFIL_OUT && bifp != NULL && (rv =
3576 		    pfil_mbuf_out(V_inet6_pfil_head, mp, bifp, NULL)) !=
3577 		    PFIL_PASS)
3578 			break;
3579 
3580 		if (V_pfil_member && ifp != NULL) {
3581 			rv = (dir == PFIL_OUT) ?
3582 			    pfil_mbuf_out(V_inet6_pfil_head, mp, ifp, NULL) :
3583 			    pfil_mbuf_in(V_inet6_pfil_head, mp, ifp, NULL);
3584 			if (rv != PFIL_PASS)
3585 				break;
3586 		}
3587 
3588 		if (V_pfil_bridge && dir == PFIL_IN && bifp != NULL && (rv =
3589 		    pfil_mbuf_in(V_inet6_pfil_head, mp, bifp, NULL)) !=
3590 		    PFIL_PASS)
3591 			break;
3592 		break;
3593 #endif
3594 	}
3595 
3596 	switch (rv) {
3597 	case PFIL_CONSUMED:
3598 		return (0);
3599 	case PFIL_DROPPED:
3600 		return (EACCES);
3601 	default:
3602 		break;
3603 	}
3604 
3605 	error = -1;
3606 
3607 	/*
3608 	 * Finally, put everything back the way it was and return
3609 	 */
3610 	if (snap) {
3611 		M_PREPEND(*mp, sizeof(struct llc), M_NOWAIT);
3612 		if (*mp == NULL)
3613 			return (error);
3614 		bcopy(&llc1, mtod(*mp, caddr_t), sizeof(struct llc));
3615 	}
3616 
3617 	M_PREPEND(*mp, ETHER_HDR_LEN, M_NOWAIT);
3618 	if (*mp == NULL)
3619 		return (error);
3620 	bcopy(&eh2, mtod(*mp, caddr_t), ETHER_HDR_LEN);
3621 
3622 	return (0);
3623 
3624 bad:
3625 	m_freem(*mp);
3626 	*mp = NULL;
3627 	return (error);
3628 }
3629 
3630 /*
3631  * Perform basic checks on header size since
3632  * pfil assumes ip_input has already processed
3633  * it for it.  Cut-and-pasted from ip_input.c.
3634  * Given how simple the IPv6 version is,
3635  * does the IPv4 version really need to be
3636  * this complicated?
3637  *
3638  * XXX Should we update ipstat here, or not?
3639  * XXX Right now we update ipstat but not
3640  * XXX csum_counter.
3641  */
3642 static int
3643 bridge_ip_checkbasic(struct mbuf **mp)
3644 {
3645 	struct mbuf *m = *mp;
3646 	struct ip *ip;
3647 	int len, hlen;
3648 	u_short sum;
3649 
3650 	if (*mp == NULL)
3651 		return (-1);
3652 
3653 	if (IP_HDR_ALIGNED_P(mtod(m, caddr_t)) == 0) {
3654 		if ((m = m_copyup(m, sizeof(struct ip),
3655 			(max_linkhdr + 3) & ~3)) == NULL) {
3656 			/* XXXJRT new stat, please */
3657 			KMOD_IPSTAT_INC(ips_toosmall);
3658 			goto bad;
3659 		}
3660 	} else if (__predict_false(m->m_len < sizeof (struct ip))) {
3661 		if ((m = m_pullup(m, sizeof (struct ip))) == NULL) {
3662 			KMOD_IPSTAT_INC(ips_toosmall);
3663 			goto bad;
3664 		}
3665 	}
3666 	ip = mtod(m, struct ip *);
3667 	if (ip == NULL) goto bad;
3668 
3669 	if (ip->ip_v != IPVERSION) {
3670 		KMOD_IPSTAT_INC(ips_badvers);
3671 		goto bad;
3672 	}
3673 	hlen = ip->ip_hl << 2;
3674 	if (hlen < sizeof(struct ip)) { /* minimum header length */
3675 		KMOD_IPSTAT_INC(ips_badhlen);
3676 		goto bad;
3677 	}
3678 	if (hlen > m->m_len) {
3679 		if ((m = m_pullup(m, hlen)) == NULL) {
3680 			KMOD_IPSTAT_INC(ips_badhlen);
3681 			goto bad;
3682 		}
3683 		ip = mtod(m, struct ip *);
3684 		if (ip == NULL) goto bad;
3685 	}
3686 
3687 	if (m->m_pkthdr.csum_flags & CSUM_IP_CHECKED) {
3688 		sum = !(m->m_pkthdr.csum_flags & CSUM_IP_VALID);
3689 	} else {
3690 		if (hlen == sizeof(struct ip)) {
3691 			sum = in_cksum_hdr(ip);
3692 		} else {
3693 			sum = in_cksum(m, hlen);
3694 		}
3695 	}
3696 	if (sum) {
3697 		KMOD_IPSTAT_INC(ips_badsum);
3698 		goto bad;
3699 	}
3700 
3701 	/* Retrieve the packet length. */
3702 	len = ntohs(ip->ip_len);
3703 
3704 	/*
3705 	 * Check for additional length bogosity
3706 	 */
3707 	if (len < hlen) {
3708 		KMOD_IPSTAT_INC(ips_badlen);
3709 		goto bad;
3710 	}
3711 
3712 	/*
3713 	 * Check that the amount of data in the buffers
3714 	 * is as at least much as the IP header would have us expect.
3715 	 * Drop packet if shorter than we expect.
3716 	 */
3717 	if (m->m_pkthdr.len < len) {
3718 		KMOD_IPSTAT_INC(ips_tooshort);
3719 		goto bad;
3720 	}
3721 
3722 	/* Checks out, proceed */
3723 	*mp = m;
3724 	return (0);
3725 
3726 bad:
3727 	*mp = m;
3728 	return (-1);
3729 }
3730 
3731 #ifdef INET6
3732 /*
3733  * Same as above, but for IPv6.
3734  * Cut-and-pasted from ip6_input.c.
3735  * XXX Should we update ip6stat, or not?
3736  */
3737 static int
3738 bridge_ip6_checkbasic(struct mbuf **mp)
3739 {
3740 	struct mbuf *m = *mp;
3741 	struct ip6_hdr *ip6;
3742 
3743 	/*
3744 	 * If the IPv6 header is not aligned, slurp it up into a new
3745 	 * mbuf with space for link headers, in the event we forward
3746 	 * it.  Otherwise, if it is aligned, make sure the entire base
3747 	 * IPv6 header is in the first mbuf of the chain.
3748 	 */
3749 	if (IP6_HDR_ALIGNED_P(mtod(m, caddr_t)) == 0) {
3750 		struct ifnet *inifp = m->m_pkthdr.rcvif;
3751 		if ((m = m_copyup(m, sizeof(struct ip6_hdr),
3752 			    (max_linkhdr + 3) & ~3)) == NULL) {
3753 			/* XXXJRT new stat, please */
3754 			IP6STAT_INC(ip6s_toosmall);
3755 			in6_ifstat_inc(inifp, ifs6_in_hdrerr);
3756 			goto bad;
3757 		}
3758 	} else if (__predict_false(m->m_len < sizeof(struct ip6_hdr))) {
3759 		struct ifnet *inifp = m->m_pkthdr.rcvif;
3760 		if ((m = m_pullup(m, sizeof(struct ip6_hdr))) == NULL) {
3761 			IP6STAT_INC(ip6s_toosmall);
3762 			in6_ifstat_inc(inifp, ifs6_in_hdrerr);
3763 			goto bad;
3764 		}
3765 	}
3766 
3767 	ip6 = mtod(m, struct ip6_hdr *);
3768 
3769 	if ((ip6->ip6_vfc & IPV6_VERSION_MASK) != IPV6_VERSION) {
3770 		IP6STAT_INC(ip6s_badvers);
3771 		in6_ifstat_inc(m->m_pkthdr.rcvif, ifs6_in_hdrerr);
3772 		goto bad;
3773 	}
3774 
3775 	/* Checks out, proceed */
3776 	*mp = m;
3777 	return (0);
3778 
3779 bad:
3780 	*mp = m;
3781 	return (-1);
3782 }
3783 #endif /* INET6 */
3784 
3785 /*
3786  * bridge_fragment:
3787  *
3788  *	Fragment mbuf chain in multiple packets and prepend ethernet header.
3789  */
3790 static int
3791 bridge_fragment(struct ifnet *ifp, struct mbuf **mp, struct ether_header *eh,
3792     int snap, struct llc *llc)
3793 {
3794 	struct mbuf *m = *mp, *nextpkt = NULL, *mprev = NULL, *mcur = NULL;
3795 	struct ip *ip;
3796 	int error = -1;
3797 
3798 	if (m->m_len < sizeof(struct ip) &&
3799 	    (m = m_pullup(m, sizeof(struct ip))) == NULL)
3800 		goto dropit;
3801 	ip = mtod(m, struct ip *);
3802 
3803 	m->m_pkthdr.csum_flags |= CSUM_IP;
3804 	error = ip_fragment(ip, &m, ifp->if_mtu, ifp->if_hwassist);
3805 	if (error)
3806 		goto dropit;
3807 
3808 	/*
3809 	 * Walk the chain and re-add the Ethernet header for
3810 	 * each mbuf packet.
3811 	 */
3812 	for (mcur = m; mcur; mcur = mcur->m_nextpkt) {
3813 		nextpkt = mcur->m_nextpkt;
3814 		mcur->m_nextpkt = NULL;
3815 		if (snap) {
3816 			M_PREPEND(mcur, sizeof(struct llc), M_NOWAIT);
3817 			if (mcur == NULL) {
3818 				error = ENOBUFS;
3819 				if (mprev != NULL)
3820 					mprev->m_nextpkt = nextpkt;
3821 				goto dropit;
3822 			}
3823 			bcopy(llc, mtod(mcur, caddr_t),sizeof(struct llc));
3824 		}
3825 
3826 		M_PREPEND(mcur, ETHER_HDR_LEN, M_NOWAIT);
3827 		if (mcur == NULL) {
3828 			error = ENOBUFS;
3829 			if (mprev != NULL)
3830 				mprev->m_nextpkt = nextpkt;
3831 			goto dropit;
3832 		}
3833 		bcopy(eh, mtod(mcur, caddr_t), ETHER_HDR_LEN);
3834 
3835 		/*
3836 		 * The previous two M_PREPEND could have inserted one or two
3837 		 * mbufs in front so we have to update the previous packet's
3838 		 * m_nextpkt.
3839 		 */
3840 		mcur->m_nextpkt = nextpkt;
3841 		if (mprev != NULL)
3842 			mprev->m_nextpkt = mcur;
3843 		else {
3844 			/* The first mbuf in the original chain needs to be
3845 			 * updated. */
3846 			*mp = mcur;
3847 		}
3848 		mprev = mcur;
3849 	}
3850 
3851 	KMOD_IPSTAT_INC(ips_fragmented);
3852 	return (error);
3853 
3854 dropit:
3855 	for (mcur = *mp; mcur; mcur = m) { /* droping the full packet chain */
3856 		m = mcur->m_nextpkt;
3857 		m_freem(mcur);
3858 	}
3859 	return (error);
3860 }
3861 
3862 static void
3863 bridge_linkstate(struct ifnet *ifp)
3864 {
3865 	struct bridge_softc *sc = ifp->if_bridge;
3866 	struct bridge_iflist *bif;
3867 	struct epoch_tracker et;
3868 
3869 	NET_EPOCH_ENTER(et);
3870 
3871 	bif = bridge_lookup_member_if(sc, ifp);
3872 	if (bif == NULL) {
3873 		NET_EPOCH_EXIT(et);
3874 		return;
3875 	}
3876 	bridge_linkcheck(sc);
3877 
3878 	bstp_linkstate(&bif->bif_stp);
3879 
3880 	NET_EPOCH_EXIT(et);
3881 }
3882 
3883 static void
3884 bridge_linkcheck(struct bridge_softc *sc)
3885 {
3886 	struct bridge_iflist *bif;
3887 	int new_link, hasls;
3888 
3889 	BRIDGE_LOCK_OR_NET_EPOCH_ASSERT(sc);
3890 
3891 	new_link = LINK_STATE_DOWN;
3892 	hasls = 0;
3893 	/* Our link is considered up if at least one of our ports is active */
3894 	CK_LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
3895 		if (bif->bif_ifp->if_capabilities & IFCAP_LINKSTATE)
3896 			hasls++;
3897 		if (bif->bif_ifp->if_link_state == LINK_STATE_UP) {
3898 			new_link = LINK_STATE_UP;
3899 			break;
3900 		}
3901 	}
3902 	if (!CK_LIST_EMPTY(&sc->sc_iflist) && !hasls) {
3903 		/* If no interfaces support link-state then we default to up */
3904 		new_link = LINK_STATE_UP;
3905 	}
3906 	if_link_state_change(sc->sc_ifp, new_link);
3907 }
3908