xref: /freebsd/sys/net/if_bridge.c (revision 96f830456fd449c4cb5a7df8a2f6c3c96993b43e)
1 /*	$NetBSD: if_bridge.c,v 1.31 2005/06/01 19:45:34 jdc Exp $	*/
2 
3 /*-
4  * SPDX-License-Identifier: BSD-4-Clause
5  *
6  * Copyright 2001 Wasabi Systems, Inc.
7  * All rights reserved.
8  *
9  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  * 3. All advertising materials mentioning features or use of this software
20  *    must display the following acknowledgement:
21  *	This product includes software developed for the NetBSD Project by
22  *	Wasabi Systems, Inc.
23  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
24  *    or promote products derived from this software without specific prior
25  *    written permission.
26  *
27  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
28  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
31  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37  * POSSIBILITY OF SUCH DAMAGE.
38  */
39 
40 /*
41  * Copyright (c) 1999, 2000 Jason L. Wright (jason@thought.net)
42  * All rights reserved.
43  *
44  * Redistribution and use in source and binary forms, with or without
45  * modification, are permitted provided that the following conditions
46  * are met:
47  * 1. Redistributions of source code must retain the above copyright
48  *    notice, this list of conditions and the following disclaimer.
49  * 2. Redistributions in binary form must reproduce the above copyright
50  *    notice, this list of conditions and the following disclaimer in the
51  *    documentation and/or other materials provided with the distribution.
52  *
53  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
54  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
55  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
56  * DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
57  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
58  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
59  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
60  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
61  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
62  * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
63  * POSSIBILITY OF SUCH DAMAGE.
64  *
65  * OpenBSD: if_bridge.c,v 1.60 2001/06/15 03:38:33 itojun Exp
66  */
67 
68 /*
69  * Network interface bridge support.
70  *
71  * TODO:
72  *
73  *	- Currently only supports Ethernet-like interfaces (Ethernet,
74  *	  802.11, VLANs on Ethernet, etc.)  Figure out a nice way
75  *	  to bridge other types of interfaces (maybe consider
76  *	  heterogeneous bridges).
77  */
78 
79 #include <sys/cdefs.h>
80 #include "opt_inet.h"
81 #include "opt_inet6.h"
82 
83 #include <sys/param.h>
84 #include <sys/eventhandler.h>
85 #include <sys/mbuf.h>
86 #include <sys/malloc.h>
87 #include <sys/protosw.h>
88 #include <sys/systm.h>
89 #include <sys/jail.h>
90 #include <sys/time.h>
91 #include <sys/socket.h> /* for net/if.h */
92 #include <sys/sockio.h>
93 #include <sys/ctype.h>  /* string functions */
94 #include <sys/kernel.h>
95 #include <sys/random.h>
96 #include <sys/syslog.h>
97 #include <sys/sysctl.h>
98 #include <vm/uma.h>
99 #include <sys/module.h>
100 #include <sys/priv.h>
101 #include <sys/proc.h>
102 #include <sys/lock.h>
103 #include <sys/mutex.h>
104 
105 #include <net/bpf.h>
106 #include <net/if.h>
107 #include <net/if_clone.h>
108 #include <net/if_dl.h>
109 #include <net/if_types.h>
110 #include <net/if_var.h>
111 #include <net/if_private.h>
112 #include <net/pfil.h>
113 #include <net/vnet.h>
114 
115 #include <netinet/in.h>
116 #include <netinet/in_systm.h>
117 #include <netinet/in_var.h>
118 #include <netinet/ip.h>
119 #include <netinet/ip_var.h>
120 #ifdef INET6
121 #include <netinet/ip6.h>
122 #include <netinet6/ip6_var.h>
123 #include <netinet6/in6_ifattach.h>
124 #endif
125 #if defined(INET) || defined(INET6)
126 #include <netinet/ip_carp.h>
127 #endif
128 #include <machine/in_cksum.h>
129 #include <netinet/if_ether.h>
130 #include <net/bridgestp.h>
131 #include <net/if_bridgevar.h>
132 #include <net/if_llc.h>
133 #include <net/if_vlan_var.h>
134 
135 #include <net/route.h>
136 
137 /*
138  * At various points in the code we need to know if we're hooked into the INET
139  * and/or INET6 pfil.  Define some macros to do that based on which IP versions
140  * are enabled in the kernel.  This avoids littering the rest of the code with
141  * #ifnet INET6 to avoid referencing V_inet6_pfil_head.
142  */
143 #ifdef INET6
144 #define		PFIL_HOOKED_IN_INET6	PFIL_HOOKED_IN(V_inet6_pfil_head)
145 #define		PFIL_HOOKED_OUT_INET6	PFIL_HOOKED_OUT(V_inet6_pfil_head)
146 #else
147 #define		PFIL_HOOKED_IN_INET6	false
148 #define		PFIL_HOOKED_OUT_INET6	false
149 #endif
150 
151 #ifdef INET
152 #define		PFIL_HOOKED_IN_INET	PFIL_HOOKED_IN(V_inet_pfil_head)
153 #define		PFIL_HOOKED_OUT_INET	PFIL_HOOKED_OUT(V_inet_pfil_head)
154 #else
155 #define		PFIL_HOOKED_IN_INET	false
156 #define		PFIL_HOOKED_OUT_INET	false
157 #endif
158 
159 #define		PFIL_HOOKED_IN_46	(PFIL_HOOKED_IN_INET6 || PFIL_HOOKED_IN_INET)
160 #define		PFIL_HOOKED_OUT_46	(PFIL_HOOKED_OUT_INET6 || PFIL_HOOKED_OUT_INET)
161 
162 /*
163  * Size of the route hash table.  Must be a power of two.
164  */
165 #ifndef BRIDGE_RTHASH_SIZE
166 #define	BRIDGE_RTHASH_SIZE		1024
167 #endif
168 
169 #define	BRIDGE_RTHASH_MASK		(BRIDGE_RTHASH_SIZE - 1)
170 
171 /*
172  * Default maximum number of addresses to cache.
173  */
174 #ifndef BRIDGE_RTABLE_MAX
175 #define	BRIDGE_RTABLE_MAX		2000
176 #endif
177 
178 /*
179  * Timeout (in seconds) for entries learned dynamically.
180  */
181 #ifndef BRIDGE_RTABLE_TIMEOUT
182 #define	BRIDGE_RTABLE_TIMEOUT		(20 * 60)	/* same as ARP */
183 #endif
184 
185 /*
186  * Number of seconds between walks of the route list.
187  */
188 #ifndef BRIDGE_RTABLE_PRUNE_PERIOD
189 #define	BRIDGE_RTABLE_PRUNE_PERIOD	(5 * 60)
190 #endif
191 
192 /*
193  * List of capabilities to possibly mask on the member interface.
194  */
195 #define	BRIDGE_IFCAPS_MASK		(IFCAP_TOE|IFCAP_TSO|IFCAP_TXCSUM|\
196 					 IFCAP_TXCSUM_IPV6|IFCAP_MEXTPG)
197 
198 /*
199  * List of capabilities to strip
200  */
201 #define	BRIDGE_IFCAPS_STRIP		IFCAP_LRO
202 
203 /*
204  * Bridge locking
205  *
206  * The bridge relies heavily on the epoch(9) system to protect its data
207  * structures. This means we can safely use CK_LISTs while in NET_EPOCH, but we
208  * must ensure there is only one writer at a time.
209  *
210  * That is: for read accesses we only need to be in NET_EPOCH, but for write
211  * accesses we must hold:
212  *
213  *  - BRIDGE_RT_LOCK, for any change to bridge_rtnodes
214  *  - BRIDGE_LOCK, for any other change
215  *
216  * The BRIDGE_LOCK is a sleepable lock, because it is held across ioctl()
217  * calls to bridge member interfaces and these ioctl()s can sleep.
218  * The BRIDGE_RT_LOCK is a non-sleepable mutex, because it is sometimes
219  * required while we're in NET_EPOCH and then we're not allowed to sleep.
220  */
221 #define BRIDGE_LOCK_INIT(_sc)		do {			\
222 	sx_init(&(_sc)->sc_sx, "if_bridge");			\
223 	mtx_init(&(_sc)->sc_rt_mtx, "if_bridge rt", NULL, MTX_DEF);	\
224 } while (0)
225 #define BRIDGE_LOCK_DESTROY(_sc)	do {	\
226 	sx_destroy(&(_sc)->sc_sx);		\
227 	mtx_destroy(&(_sc)->sc_rt_mtx);		\
228 } while (0)
229 #define BRIDGE_LOCK(_sc)		sx_xlock(&(_sc)->sc_sx)
230 #define BRIDGE_UNLOCK(_sc)		sx_xunlock(&(_sc)->sc_sx)
231 #define BRIDGE_LOCK_ASSERT(_sc)		sx_assert(&(_sc)->sc_sx, SX_XLOCKED)
232 #define BRIDGE_LOCK_OR_NET_EPOCH_ASSERT(_sc)	\
233 	    MPASS(in_epoch(net_epoch_preempt) || sx_xlocked(&(_sc)->sc_sx))
234 #define BRIDGE_UNLOCK_ASSERT(_sc)	sx_assert(&(_sc)->sc_sx, SX_UNLOCKED)
235 #define BRIDGE_RT_LOCK(_sc)		mtx_lock(&(_sc)->sc_rt_mtx)
236 #define BRIDGE_RT_UNLOCK(_sc)		mtx_unlock(&(_sc)->sc_rt_mtx)
237 #define BRIDGE_RT_LOCK_ASSERT(_sc)	mtx_assert(&(_sc)->sc_rt_mtx, MA_OWNED)
238 #define BRIDGE_RT_LOCK_OR_NET_EPOCH_ASSERT(_sc)	\
239 	    MPASS(in_epoch(net_epoch_preempt) || mtx_owned(&(_sc)->sc_rt_mtx))
240 
241 struct bridge_softc;
242 
243 /*
244  * Bridge interface list entry.
245  */
246 struct bridge_iflist {
247 	CK_LIST_ENTRY(bridge_iflist) bif_next;
248 	struct ifnet		*bif_ifp;	/* member if */
249 	struct bridge_softc	*bif_sc;	/* parent bridge */
250 	struct bstp_port	bif_stp;	/* STP state */
251 	uint32_t		bif_flags;	/* member if flags */
252 	int			bif_savedcaps;	/* saved capabilities */
253 	uint32_t		bif_addrmax;	/* max # of addresses */
254 	uint32_t		bif_addrcnt;	/* cur. # of addresses */
255 	uint32_t		bif_addrexceeded;/* # of address violations */
256 	struct epoch_context	bif_epoch_ctx;
257 };
258 
259 /*
260  * Bridge route node.
261  */
262 struct bridge_rtnode {
263 	CK_LIST_ENTRY(bridge_rtnode) brt_hash;	/* hash table linkage */
264 	CK_LIST_ENTRY(bridge_rtnode) brt_list;	/* list linkage */
265 	struct bridge_iflist	*brt_dst;	/* destination if */
266 	unsigned long		brt_expire;	/* expiration time */
267 	uint8_t			brt_flags;	/* address flags */
268 	uint8_t			brt_addr[ETHER_ADDR_LEN];
269 	ether_vlanid_t		brt_vlan;	/* vlan id */
270 	struct	vnet		*brt_vnet;
271 	struct	epoch_context	brt_epoch_ctx;
272 };
273 #define	brt_ifp			brt_dst->bif_ifp
274 
275 /*
276  * Software state for each bridge.
277  */
278 struct bridge_softc {
279 	struct ifnet		*sc_ifp;	/* make this an interface */
280 	LIST_ENTRY(bridge_softc) sc_list;
281 	struct sx		sc_sx;
282 	struct mtx		sc_rt_mtx;
283 	uint32_t		sc_brtmax;	/* max # of addresses */
284 	uint32_t		sc_brtcnt;	/* cur. # of addresses */
285 	uint32_t		sc_brttimeout;	/* rt timeout in seconds */
286 	struct callout		sc_brcallout;	/* bridge callout */
287 	CK_LIST_HEAD(, bridge_iflist) sc_iflist;	/* member interface list */
288 	CK_LIST_HEAD(, bridge_rtnode) *sc_rthash;	/* our forwarding table */
289 	CK_LIST_HEAD(, bridge_rtnode) sc_rtlist;	/* list version of above */
290 	uint32_t		sc_rthash_key;	/* key for hash */
291 	CK_LIST_HEAD(, bridge_iflist) sc_spanlist;	/* span ports list */
292 	struct bstp_state	sc_stp;		/* STP state */
293 	uint32_t		sc_brtexceeded;	/* # of cache drops */
294 	struct ifnet		*sc_ifaddr;	/* member mac copied from */
295 	struct ether_addr	sc_defaddr;	/* Default MAC address */
296 	if_input_fn_t		sc_if_input;	/* Saved copy of if_input */
297 	struct epoch_context	sc_epoch_ctx;
298 };
299 
300 VNET_DEFINE_STATIC(struct sx, bridge_list_sx);
301 #define	V_bridge_list_sx	VNET(bridge_list_sx)
302 static eventhandler_tag bridge_detach_cookie;
303 
304 int	bridge_rtable_prune_period = BRIDGE_RTABLE_PRUNE_PERIOD;
305 
306 VNET_DEFINE_STATIC(uma_zone_t, bridge_rtnode_zone);
307 #define	V_bridge_rtnode_zone	VNET(bridge_rtnode_zone)
308 
309 static int	bridge_clone_create(struct if_clone *, char *, size_t,
310 		    struct ifc_data *, struct ifnet **);
311 static int	bridge_clone_destroy(struct if_clone *, struct ifnet *, uint32_t);
312 
313 static int	bridge_ioctl(struct ifnet *, u_long, caddr_t);
314 static void	bridge_mutecaps(struct bridge_softc *);
315 static void	bridge_set_ifcap(struct bridge_softc *, struct bridge_iflist *,
316 		    int);
317 static void	bridge_ifdetach(void *arg __unused, struct ifnet *);
318 static void	bridge_init(void *);
319 static void	bridge_dummynet(struct mbuf *, struct ifnet *);
320 static bool	bridge_same(const void *, const void *);
321 static void	*bridge_get_softc(struct ifnet *);
322 static void	bridge_stop(struct ifnet *, int);
323 static int	bridge_transmit(struct ifnet *, struct mbuf *);
324 #ifdef ALTQ
325 static void	bridge_altq_start(if_t);
326 static int	bridge_altq_transmit(if_t, struct mbuf *);
327 #endif
328 static void	bridge_qflush(struct ifnet *);
329 static struct mbuf *bridge_input(struct ifnet *, struct mbuf *);
330 static void	bridge_inject(struct ifnet *, struct mbuf *);
331 static int	bridge_output(struct ifnet *, struct mbuf *, struct sockaddr *,
332 		    struct rtentry *);
333 static int	bridge_enqueue(struct bridge_softc *, struct ifnet *,
334 		    struct mbuf *);
335 static void	bridge_rtdelete(struct bridge_softc *, struct ifnet *ifp, int);
336 
337 static void	bridge_forward(struct bridge_softc *, struct bridge_iflist *,
338 		    struct mbuf *m);
339 
340 static void	bridge_timer(void *);
341 
342 static void	bridge_broadcast(struct bridge_softc *, struct ifnet *,
343 		    struct mbuf *, int);
344 static void	bridge_span(struct bridge_softc *, struct mbuf *);
345 
346 static int	bridge_rtupdate(struct bridge_softc *, const uint8_t *,
347 		    ether_vlanid_t, struct bridge_iflist *, int, uint8_t);
348 static struct ifnet *bridge_rtlookup(struct bridge_softc *, const uint8_t *,
349 		    ether_vlanid_t);
350 static void	bridge_rttrim(struct bridge_softc *);
351 static void	bridge_rtage(struct bridge_softc *);
352 static void	bridge_rtflush(struct bridge_softc *, int);
353 static int	bridge_rtdaddr(struct bridge_softc *, const uint8_t *,
354 		    ether_vlanid_t);
355 
356 static void	bridge_rtable_init(struct bridge_softc *);
357 static void	bridge_rtable_fini(struct bridge_softc *);
358 
359 static int	bridge_rtnode_addr_cmp(const uint8_t *, const uint8_t *);
360 static struct bridge_rtnode *bridge_rtnode_lookup(struct bridge_softc *,
361 		    const uint8_t *, ether_vlanid_t);
362 static int	bridge_rtnode_insert(struct bridge_softc *,
363 		    struct bridge_rtnode *);
364 static void	bridge_rtnode_destroy(struct bridge_softc *,
365 		    struct bridge_rtnode *);
366 static void	bridge_rtable_expire(struct ifnet *, int);
367 static void	bridge_state_change(struct ifnet *, int);
368 
369 static struct bridge_iflist *bridge_lookup_member(struct bridge_softc *,
370 		    const char *name);
371 static struct bridge_iflist *bridge_lookup_member_if(struct bridge_softc *,
372 		    struct ifnet *ifp);
373 static void	bridge_delete_member(struct bridge_softc *,
374 		    struct bridge_iflist *, int);
375 static void	bridge_delete_span(struct bridge_softc *,
376 		    struct bridge_iflist *);
377 
378 static int	bridge_ioctl_add(struct bridge_softc *, void *);
379 static int	bridge_ioctl_del(struct bridge_softc *, void *);
380 static int	bridge_ioctl_gifflags(struct bridge_softc *, void *);
381 static int	bridge_ioctl_sifflags(struct bridge_softc *, void *);
382 static int	bridge_ioctl_scache(struct bridge_softc *, void *);
383 static int	bridge_ioctl_gcache(struct bridge_softc *, void *);
384 static int	bridge_ioctl_gifs(struct bridge_softc *, void *);
385 static int	bridge_ioctl_rts(struct bridge_softc *, void *);
386 static int	bridge_ioctl_saddr(struct bridge_softc *, void *);
387 static int	bridge_ioctl_sto(struct bridge_softc *, void *);
388 static int	bridge_ioctl_gto(struct bridge_softc *, void *);
389 static int	bridge_ioctl_daddr(struct bridge_softc *, void *);
390 static int	bridge_ioctl_flush(struct bridge_softc *, void *);
391 static int	bridge_ioctl_gpri(struct bridge_softc *, void *);
392 static int	bridge_ioctl_spri(struct bridge_softc *, void *);
393 static int	bridge_ioctl_ght(struct bridge_softc *, void *);
394 static int	bridge_ioctl_sht(struct bridge_softc *, void *);
395 static int	bridge_ioctl_gfd(struct bridge_softc *, void *);
396 static int	bridge_ioctl_sfd(struct bridge_softc *, void *);
397 static int	bridge_ioctl_gma(struct bridge_softc *, void *);
398 static int	bridge_ioctl_sma(struct bridge_softc *, void *);
399 static int	bridge_ioctl_sifprio(struct bridge_softc *, void *);
400 static int	bridge_ioctl_sifcost(struct bridge_softc *, void *);
401 static int	bridge_ioctl_sifmaxaddr(struct bridge_softc *, void *);
402 static int	bridge_ioctl_addspan(struct bridge_softc *, void *);
403 static int	bridge_ioctl_delspan(struct bridge_softc *, void *);
404 static int	bridge_ioctl_gbparam(struct bridge_softc *, void *);
405 static int	bridge_ioctl_grte(struct bridge_softc *, void *);
406 static int	bridge_ioctl_gifsstp(struct bridge_softc *, void *);
407 static int	bridge_ioctl_sproto(struct bridge_softc *, void *);
408 static int	bridge_ioctl_stxhc(struct bridge_softc *, void *);
409 static int	bridge_pfil(struct mbuf **, struct ifnet *, struct ifnet *,
410 		    int);
411 #ifdef INET
412 static int	bridge_ip_checkbasic(struct mbuf **mp);
413 static int	bridge_fragment(struct ifnet *, struct mbuf **mp,
414 		    struct ether_header *, int, struct llc *);
415 #endif /* INET */
416 #ifdef INET6
417 static int	bridge_ip6_checkbasic(struct mbuf **mp);
418 #endif /* INET6 */
419 static void	bridge_linkstate(struct ifnet *ifp);
420 static void	bridge_linkcheck(struct bridge_softc *sc);
421 
422 /*
423  * Use the "null" value from IEEE 802.1Q-2014 Table 9-2
424  * to indicate untagged frames.
425  */
426 #define	VLANTAGOF(_m)	\
427     ((_m->m_flags & M_VLANTAG) ? EVL_VLANOFTAG(_m->m_pkthdr.ether_vtag) : DOT1Q_VID_NULL)
428 
429 static struct bstp_cb_ops bridge_ops = {
430 	.bcb_state = bridge_state_change,
431 	.bcb_rtage = bridge_rtable_expire
432 };
433 
434 SYSCTL_DECL(_net_link);
435 static SYSCTL_NODE(_net_link, IFT_BRIDGE, bridge, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
436     "Bridge");
437 
438 /* only pass IP[46] packets when pfil is enabled */
439 VNET_DEFINE_STATIC(int, pfil_onlyip) = 1;
440 #define	V_pfil_onlyip	VNET(pfil_onlyip)
441 SYSCTL_INT(_net_link_bridge, OID_AUTO, pfil_onlyip,
442     CTLFLAG_RWTUN | CTLFLAG_VNET, &VNET_NAME(pfil_onlyip), 0,
443     "Only pass IP packets when pfil is enabled");
444 
445 /* run pfil hooks on the bridge interface */
446 VNET_DEFINE_STATIC(int, pfil_bridge) = 0;
447 #define	V_pfil_bridge	VNET(pfil_bridge)
448 SYSCTL_INT(_net_link_bridge, OID_AUTO, pfil_bridge,
449     CTLFLAG_RWTUN | CTLFLAG_VNET, &VNET_NAME(pfil_bridge), 0,
450     "Packet filter on the bridge interface");
451 
452 /* layer2 filter with ipfw */
453 VNET_DEFINE_STATIC(int, pfil_ipfw);
454 #define	V_pfil_ipfw	VNET(pfil_ipfw)
455 
456 /* layer2 ARP filter with ipfw */
457 VNET_DEFINE_STATIC(int, pfil_ipfw_arp);
458 #define	V_pfil_ipfw_arp	VNET(pfil_ipfw_arp)
459 SYSCTL_INT(_net_link_bridge, OID_AUTO, ipfw_arp,
460     CTLFLAG_RWTUN | CTLFLAG_VNET, &VNET_NAME(pfil_ipfw_arp), 0,
461     "Filter ARP packets through IPFW layer2");
462 
463 /* run pfil hooks on the member interface */
464 VNET_DEFINE_STATIC(int, pfil_member) = 0;
465 #define	V_pfil_member	VNET(pfil_member)
466 SYSCTL_INT(_net_link_bridge, OID_AUTO, pfil_member,
467     CTLFLAG_RWTUN | CTLFLAG_VNET, &VNET_NAME(pfil_member), 0,
468     "Packet filter on the member interface");
469 
470 /* run pfil hooks on the physical interface for locally destined packets */
471 VNET_DEFINE_STATIC(int, pfil_local_phys);
472 #define	V_pfil_local_phys	VNET(pfil_local_phys)
473 SYSCTL_INT(_net_link_bridge, OID_AUTO, pfil_local_phys,
474     CTLFLAG_RWTUN | CTLFLAG_VNET, &VNET_NAME(pfil_local_phys), 0,
475     "Packet filter on the physical interface for locally destined packets");
476 
477 /* log STP state changes */
478 VNET_DEFINE_STATIC(int, log_stp);
479 #define	V_log_stp	VNET(log_stp)
480 SYSCTL_INT(_net_link_bridge, OID_AUTO, log_stp,
481     CTLFLAG_RWTUN | CTLFLAG_VNET, &VNET_NAME(log_stp), 0,
482     "Log STP state changes");
483 
484 /* share MAC with first bridge member */
485 VNET_DEFINE_STATIC(int, bridge_inherit_mac);
486 #define	V_bridge_inherit_mac	VNET(bridge_inherit_mac)
487 SYSCTL_INT(_net_link_bridge, OID_AUTO, inherit_mac,
488     CTLFLAG_RWTUN | CTLFLAG_VNET, &VNET_NAME(bridge_inherit_mac), 0,
489     "Inherit MAC address from the first bridge member");
490 
491 VNET_DEFINE_STATIC(int, allow_llz_overlap) = 0;
492 #define	V_allow_llz_overlap	VNET(allow_llz_overlap)
493 SYSCTL_INT(_net_link_bridge, OID_AUTO, allow_llz_overlap,
494     CTLFLAG_RW | CTLFLAG_VNET, &VNET_NAME(allow_llz_overlap), 0,
495     "Allow overlap of link-local scope "
496     "zones of a bridge interface and the member interfaces");
497 
498 /* log MAC address port flapping */
499 VNET_DEFINE_STATIC(bool, log_mac_flap) = true;
500 #define	V_log_mac_flap	VNET(log_mac_flap)
501 SYSCTL_BOOL(_net_link_bridge, OID_AUTO, log_mac_flap,
502     CTLFLAG_RW | CTLFLAG_VNET, &VNET_NAME(log_mac_flap), true,
503     "Log MAC address port flapping");
504 
505 VNET_DEFINE_STATIC(int, log_interval) = 5;
506 VNET_DEFINE_STATIC(int, log_count) = 0;
507 VNET_DEFINE_STATIC(struct timeval, log_last) = { 0 };
508 
509 #define	V_log_interval	VNET(log_interval)
510 #define	V_log_count	VNET(log_count)
511 #define	V_log_last	VNET(log_last)
512 
513 struct bridge_control {
514 	int	(*bc_func)(struct bridge_softc *, void *);
515 	int	bc_argsize;
516 	int	bc_flags;
517 };
518 
519 #define	BC_F_COPYIN		0x01	/* copy arguments in */
520 #define	BC_F_COPYOUT		0x02	/* copy arguments out */
521 #define	BC_F_SUSER		0x04	/* do super-user check */
522 
523 static const struct bridge_control bridge_control_table[] = {
524 	{ bridge_ioctl_add,		sizeof(struct ifbreq),
525 	  BC_F_COPYIN|BC_F_SUSER },
526 	{ bridge_ioctl_del,		sizeof(struct ifbreq),
527 	  BC_F_COPYIN|BC_F_SUSER },
528 
529 	{ bridge_ioctl_gifflags,	sizeof(struct ifbreq),
530 	  BC_F_COPYIN|BC_F_COPYOUT },
531 	{ bridge_ioctl_sifflags,	sizeof(struct ifbreq),
532 	  BC_F_COPYIN|BC_F_SUSER },
533 
534 	{ bridge_ioctl_scache,		sizeof(struct ifbrparam),
535 	  BC_F_COPYIN|BC_F_SUSER },
536 	{ bridge_ioctl_gcache,		sizeof(struct ifbrparam),
537 	  BC_F_COPYOUT },
538 
539 	{ bridge_ioctl_gifs,		sizeof(struct ifbifconf),
540 	  BC_F_COPYIN|BC_F_COPYOUT },
541 	{ bridge_ioctl_rts,		sizeof(struct ifbaconf),
542 	  BC_F_COPYIN|BC_F_COPYOUT },
543 
544 	{ bridge_ioctl_saddr,		sizeof(struct ifbareq),
545 	  BC_F_COPYIN|BC_F_SUSER },
546 
547 	{ bridge_ioctl_sto,		sizeof(struct ifbrparam),
548 	  BC_F_COPYIN|BC_F_SUSER },
549 	{ bridge_ioctl_gto,		sizeof(struct ifbrparam),
550 	  BC_F_COPYOUT },
551 
552 	{ bridge_ioctl_daddr,		sizeof(struct ifbareq),
553 	  BC_F_COPYIN|BC_F_SUSER },
554 
555 	{ bridge_ioctl_flush,		sizeof(struct ifbreq),
556 	  BC_F_COPYIN|BC_F_SUSER },
557 
558 	{ bridge_ioctl_gpri,		sizeof(struct ifbrparam),
559 	  BC_F_COPYOUT },
560 	{ bridge_ioctl_spri,		sizeof(struct ifbrparam),
561 	  BC_F_COPYIN|BC_F_SUSER },
562 
563 	{ bridge_ioctl_ght,		sizeof(struct ifbrparam),
564 	  BC_F_COPYOUT },
565 	{ bridge_ioctl_sht,		sizeof(struct ifbrparam),
566 	  BC_F_COPYIN|BC_F_SUSER },
567 
568 	{ bridge_ioctl_gfd,		sizeof(struct ifbrparam),
569 	  BC_F_COPYOUT },
570 	{ bridge_ioctl_sfd,		sizeof(struct ifbrparam),
571 	  BC_F_COPYIN|BC_F_SUSER },
572 
573 	{ bridge_ioctl_gma,		sizeof(struct ifbrparam),
574 	  BC_F_COPYOUT },
575 	{ bridge_ioctl_sma,		sizeof(struct ifbrparam),
576 	  BC_F_COPYIN|BC_F_SUSER },
577 
578 	{ bridge_ioctl_sifprio,		sizeof(struct ifbreq),
579 	  BC_F_COPYIN|BC_F_SUSER },
580 
581 	{ bridge_ioctl_sifcost,		sizeof(struct ifbreq),
582 	  BC_F_COPYIN|BC_F_SUSER },
583 
584 	{ bridge_ioctl_addspan,		sizeof(struct ifbreq),
585 	  BC_F_COPYIN|BC_F_SUSER },
586 	{ bridge_ioctl_delspan,		sizeof(struct ifbreq),
587 	  BC_F_COPYIN|BC_F_SUSER },
588 
589 	{ bridge_ioctl_gbparam,		sizeof(struct ifbropreq),
590 	  BC_F_COPYOUT },
591 
592 	{ bridge_ioctl_grte,		sizeof(struct ifbrparam),
593 	  BC_F_COPYOUT },
594 
595 	{ bridge_ioctl_gifsstp,		sizeof(struct ifbpstpconf),
596 	  BC_F_COPYIN|BC_F_COPYOUT },
597 
598 	{ bridge_ioctl_sproto,		sizeof(struct ifbrparam),
599 	  BC_F_COPYIN|BC_F_SUSER },
600 
601 	{ bridge_ioctl_stxhc,		sizeof(struct ifbrparam),
602 	  BC_F_COPYIN|BC_F_SUSER },
603 
604 	{ bridge_ioctl_sifmaxaddr,	sizeof(struct ifbreq),
605 	  BC_F_COPYIN|BC_F_SUSER },
606 
607 };
608 static const int bridge_control_table_size = nitems(bridge_control_table);
609 
610 VNET_DEFINE_STATIC(LIST_HEAD(, bridge_softc), bridge_list) =
611     LIST_HEAD_INITIALIZER();
612 #define	V_bridge_list	VNET(bridge_list)
613 #define	BRIDGE_LIST_LOCK_INIT(x)	sx_init(&V_bridge_list_sx,	\
614 					    "if_bridge list")
615 #define	BRIDGE_LIST_LOCK_DESTROY(x)	sx_destroy(&V_bridge_list_sx)
616 #define	BRIDGE_LIST_LOCK(x)		sx_xlock(&V_bridge_list_sx)
617 #define	BRIDGE_LIST_UNLOCK(x)		sx_xunlock(&V_bridge_list_sx)
618 
619 VNET_DEFINE_STATIC(struct if_clone *, bridge_cloner);
620 #define	V_bridge_cloner	VNET(bridge_cloner)
621 
622 static const char bridge_name[] = "bridge";
623 
624 static void
vnet_bridge_init(const void * unused __unused)625 vnet_bridge_init(const void *unused __unused)
626 {
627 
628 	V_bridge_rtnode_zone = uma_zcreate("bridge_rtnode",
629 	    sizeof(struct bridge_rtnode), NULL, NULL, NULL, NULL,
630 	    UMA_ALIGN_PTR, 0);
631 	BRIDGE_LIST_LOCK_INIT();
632 
633 	struct if_clone_addreq req = {
634 		.create_f = bridge_clone_create,
635 		.destroy_f = bridge_clone_destroy,
636 		.flags = IFC_F_AUTOUNIT,
637 	};
638 	V_bridge_cloner = ifc_attach_cloner(bridge_name, &req);
639 }
640 VNET_SYSINIT(vnet_bridge_init, SI_SUB_PROTO_IFATTACHDOMAIN, SI_ORDER_ANY,
641     vnet_bridge_init, NULL);
642 
643 static void
vnet_bridge_uninit(const void * unused __unused)644 vnet_bridge_uninit(const void *unused __unused)
645 {
646 
647 	ifc_detach_cloner(V_bridge_cloner);
648 	V_bridge_cloner = NULL;
649 	BRIDGE_LIST_LOCK_DESTROY();
650 
651 	/* Callbacks may use the UMA zone. */
652 	NET_EPOCH_DRAIN_CALLBACKS();
653 
654 	uma_zdestroy(V_bridge_rtnode_zone);
655 }
656 VNET_SYSUNINIT(vnet_bridge_uninit, SI_SUB_PSEUDO, SI_ORDER_ANY,
657     vnet_bridge_uninit, NULL);
658 
659 static int
bridge_modevent(module_t mod,int type,void * data)660 bridge_modevent(module_t mod, int type, void *data)
661 {
662 
663 	switch (type) {
664 	case MOD_LOAD:
665 		bridge_dn_p = bridge_dummynet;
666 		bridge_same_p = bridge_same;
667 		bridge_get_softc_p = bridge_get_softc;
668 		bridge_detach_cookie = EVENTHANDLER_REGISTER(
669 		    ifnet_departure_event, bridge_ifdetach, NULL,
670 		    EVENTHANDLER_PRI_ANY);
671 		break;
672 	case MOD_UNLOAD:
673 		EVENTHANDLER_DEREGISTER(ifnet_departure_event,
674 		    bridge_detach_cookie);
675 		bridge_dn_p = NULL;
676 		bridge_same_p = NULL;
677 		bridge_get_softc_p = NULL;
678 		break;
679 	default:
680 		return (EOPNOTSUPP);
681 	}
682 	return (0);
683 }
684 
685 static moduledata_t bridge_mod = {
686 	"if_bridge",
687 	bridge_modevent,
688 	0
689 };
690 
691 DECLARE_MODULE(if_bridge, bridge_mod, SI_SUB_PSEUDO, SI_ORDER_ANY);
692 MODULE_VERSION(if_bridge, 1);
693 MODULE_DEPEND(if_bridge, bridgestp, 1, 1, 1);
694 
695 /*
696  * handler for net.link.bridge.ipfw
697  */
698 static int
sysctl_pfil_ipfw(SYSCTL_HANDLER_ARGS)699 sysctl_pfil_ipfw(SYSCTL_HANDLER_ARGS)
700 {
701 	int enable = V_pfil_ipfw;
702 	int error;
703 
704 	error = sysctl_handle_int(oidp, &enable, 0, req);
705 	enable &= 1;
706 
707 	if (enable != V_pfil_ipfw) {
708 		V_pfil_ipfw = enable;
709 
710 		/*
711 		 * Disable pfil so that ipfw doesnt run twice, if the user
712 		 * really wants both then they can re-enable pfil_bridge and/or
713 		 * pfil_member. Also allow non-ip packets as ipfw can filter by
714 		 * layer2 type.
715 		 */
716 		if (V_pfil_ipfw) {
717 			V_pfil_onlyip = 0;
718 			V_pfil_bridge = 0;
719 			V_pfil_member = 0;
720 		}
721 	}
722 
723 	return (error);
724 }
725 SYSCTL_PROC(_net_link_bridge, OID_AUTO, ipfw,
726     CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_VNET | CTLFLAG_NEEDGIANT,
727     &VNET_NAME(pfil_ipfw), 0, &sysctl_pfil_ipfw, "I",
728     "Layer2 filter with IPFW");
729 
730 #ifdef VIMAGE
731 static void
bridge_reassign(struct ifnet * ifp,struct vnet * newvnet,char * arg)732 bridge_reassign(struct ifnet *ifp, struct vnet *newvnet, char *arg)
733 {
734 	struct bridge_softc *sc = ifp->if_softc;
735 	struct bridge_iflist *bif;
736 
737 	BRIDGE_LOCK(sc);
738 
739 	while ((bif = CK_LIST_FIRST(&sc->sc_iflist)) != NULL)
740 		bridge_delete_member(sc, bif, 0);
741 
742 	while ((bif = CK_LIST_FIRST(&sc->sc_spanlist)) != NULL) {
743 		bridge_delete_span(sc, bif);
744 	}
745 
746 	BRIDGE_UNLOCK(sc);
747 
748 	ether_reassign(ifp, newvnet, arg);
749 }
750 #endif
751 
752 /*
753  * bridge_get_softc:
754  *
755  * Return the bridge softc for an ifnet.
756  */
757 static void *
bridge_get_softc(struct ifnet * ifp)758 bridge_get_softc(struct ifnet *ifp)
759 {
760 	struct bridge_iflist *bif;
761 
762 	NET_EPOCH_ASSERT();
763 
764 	bif = ifp->if_bridge;
765 	if (bif == NULL)
766 		return (NULL);
767 	return (bif->bif_sc);
768 }
769 
770 /*
771  * bridge_same:
772  *
773  * Return true if two interfaces are in the same bridge.  This is only used by
774  * bridgestp via bridge_same_p.
775  */
776 static bool
bridge_same(const void * bifap,const void * bifbp)777 bridge_same(const void *bifap, const void *bifbp)
778 {
779 	const struct bridge_iflist *bifa = bifap, *bifb = bifbp;
780 
781 	NET_EPOCH_ASSERT();
782 
783 	if (bifa == NULL || bifb == NULL)
784 		return (false);
785 
786 	return (bifa->bif_sc == bifb->bif_sc);
787 }
788 
789 /*
790  * bridge_clone_create:
791  *
792  *	Create a new bridge instance.
793  */
794 static int
bridge_clone_create(struct if_clone * ifc,char * name,size_t len,struct ifc_data * ifd,struct ifnet ** ifpp)795 bridge_clone_create(struct if_clone *ifc, char *name, size_t len,
796     struct ifc_data *ifd, struct ifnet **ifpp)
797 {
798 	struct bridge_softc *sc;
799 	struct ifnet *ifp;
800 
801 	sc = malloc(sizeof(*sc), M_DEVBUF, M_WAITOK|M_ZERO);
802 	ifp = sc->sc_ifp = if_alloc(IFT_ETHER);
803 
804 	BRIDGE_LOCK_INIT(sc);
805 	sc->sc_brtmax = BRIDGE_RTABLE_MAX;
806 	sc->sc_brttimeout = BRIDGE_RTABLE_TIMEOUT;
807 
808 	/* Initialize our routing table. */
809 	bridge_rtable_init(sc);
810 
811 	callout_init_mtx(&sc->sc_brcallout, &sc->sc_rt_mtx, 0);
812 
813 	CK_LIST_INIT(&sc->sc_iflist);
814 	CK_LIST_INIT(&sc->sc_spanlist);
815 
816 	ifp->if_softc = sc;
817 	if_initname(ifp, bridge_name, ifd->unit);
818 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
819 	ifp->if_ioctl = bridge_ioctl;
820 #ifdef ALTQ
821 	ifp->if_start = bridge_altq_start;
822 	ifp->if_transmit = bridge_altq_transmit;
823 	IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen);
824 	ifp->if_snd.ifq_drv_maxlen = 0;
825 	IFQ_SET_READY(&ifp->if_snd);
826 #else
827 	ifp->if_transmit = bridge_transmit;
828 #endif
829 	ifp->if_qflush = bridge_qflush;
830 	ifp->if_init = bridge_init;
831 	ifp->if_type = IFT_BRIDGE;
832 
833 	ether_gen_addr(ifp, &sc->sc_defaddr);
834 
835 	bstp_attach(&sc->sc_stp, &bridge_ops);
836 	ether_ifattach(ifp, sc->sc_defaddr.octet);
837 	/* Now undo some of the damage... */
838 	ifp->if_baudrate = 0;
839 	ifp->if_type = IFT_BRIDGE;
840 #ifdef VIMAGE
841 	ifp->if_reassign = bridge_reassign;
842 #endif
843 	sc->sc_if_input = ifp->if_input;	/* ether_input */
844 	ifp->if_input = bridge_inject;
845 
846 	/*
847 	 * Allow BRIDGE_INPUT() to pass in packets originating from the bridge
848 	 * itself via bridge_inject().  This is required for netmap but
849 	 * otherwise has no effect.
850 	 */
851 	ifp->if_bridge_input = bridge_input;
852 
853 	BRIDGE_LIST_LOCK();
854 	LIST_INSERT_HEAD(&V_bridge_list, sc, sc_list);
855 	BRIDGE_LIST_UNLOCK();
856 	*ifpp = ifp;
857 
858 	return (0);
859 }
860 
861 static void
bridge_clone_destroy_cb(struct epoch_context * ctx)862 bridge_clone_destroy_cb(struct epoch_context *ctx)
863 {
864 	struct bridge_softc *sc;
865 
866 	sc = __containerof(ctx, struct bridge_softc, sc_epoch_ctx);
867 
868 	BRIDGE_LOCK_DESTROY(sc);
869 	free(sc, M_DEVBUF);
870 }
871 
872 /*
873  * bridge_clone_destroy:
874  *
875  *	Destroy a bridge instance.
876  */
877 static int
bridge_clone_destroy(struct if_clone * ifc,struct ifnet * ifp,uint32_t flags)878 bridge_clone_destroy(struct if_clone *ifc, struct ifnet *ifp, uint32_t flags)
879 {
880 	struct bridge_softc *sc = ifp->if_softc;
881 	struct bridge_iflist *bif;
882 	struct epoch_tracker et;
883 
884 	BRIDGE_LOCK(sc);
885 
886 	bridge_stop(ifp, 1);
887 	ifp->if_flags &= ~IFF_UP;
888 
889 	while ((bif = CK_LIST_FIRST(&sc->sc_iflist)) != NULL)
890 		bridge_delete_member(sc, bif, 0);
891 
892 	while ((bif = CK_LIST_FIRST(&sc->sc_spanlist)) != NULL) {
893 		bridge_delete_span(sc, bif);
894 	}
895 
896 	/* Tear down the routing table. */
897 	bridge_rtable_fini(sc);
898 
899 	BRIDGE_UNLOCK(sc);
900 
901 	NET_EPOCH_ENTER(et);
902 
903 	callout_drain(&sc->sc_brcallout);
904 
905 	BRIDGE_LIST_LOCK();
906 	LIST_REMOVE(sc, sc_list);
907 	BRIDGE_LIST_UNLOCK();
908 
909 	bstp_detach(&sc->sc_stp);
910 #ifdef ALTQ
911 	IFQ_PURGE(&ifp->if_snd);
912 #endif
913 	NET_EPOCH_EXIT(et);
914 
915 	ether_ifdetach(ifp);
916 	if_free(ifp);
917 
918 	NET_EPOCH_CALL(bridge_clone_destroy_cb, &sc->sc_epoch_ctx);
919 
920 	return (0);
921 }
922 
923 /*
924  * bridge_ioctl:
925  *
926  *	Handle a control request from the operator.
927  */
928 static int
bridge_ioctl(struct ifnet * ifp,u_long cmd,caddr_t data)929 bridge_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
930 {
931 	struct bridge_softc *sc = ifp->if_softc;
932 	struct ifreq *ifr = (struct ifreq *)data;
933 	struct bridge_iflist *bif;
934 	struct thread *td = curthread;
935 	union {
936 		struct ifbreq ifbreq;
937 		struct ifbifconf ifbifconf;
938 		struct ifbareq ifbareq;
939 		struct ifbaconf ifbaconf;
940 		struct ifbrparam ifbrparam;
941 		struct ifbropreq ifbropreq;
942 	} args;
943 	struct ifdrv *ifd = (struct ifdrv *) data;
944 	const struct bridge_control *bc;
945 	int error = 0, oldmtu;
946 
947 	BRIDGE_LOCK(sc);
948 
949 	switch (cmd) {
950 	case SIOCADDMULTI:
951 	case SIOCDELMULTI:
952 		break;
953 
954 	case SIOCGDRVSPEC:
955 	case SIOCSDRVSPEC:
956 		if (ifd->ifd_cmd >= bridge_control_table_size) {
957 			error = EINVAL;
958 			break;
959 		}
960 		bc = &bridge_control_table[ifd->ifd_cmd];
961 
962 		if (cmd == SIOCGDRVSPEC &&
963 		    (bc->bc_flags & BC_F_COPYOUT) == 0) {
964 			error = EINVAL;
965 			break;
966 		}
967 		else if (cmd == SIOCSDRVSPEC &&
968 		    (bc->bc_flags & BC_F_COPYOUT) != 0) {
969 			error = EINVAL;
970 			break;
971 		}
972 
973 		if (bc->bc_flags & BC_F_SUSER) {
974 			error = priv_check(td, PRIV_NET_BRIDGE);
975 			if (error)
976 				break;
977 		}
978 
979 		if (ifd->ifd_len != bc->bc_argsize ||
980 		    ifd->ifd_len > sizeof(args)) {
981 			error = EINVAL;
982 			break;
983 		}
984 
985 		bzero(&args, sizeof(args));
986 		if (bc->bc_flags & BC_F_COPYIN) {
987 			error = copyin(ifd->ifd_data, &args, ifd->ifd_len);
988 			if (error)
989 				break;
990 		}
991 
992 		oldmtu = ifp->if_mtu;
993 		error = (*bc->bc_func)(sc, &args);
994 		if (error)
995 			break;
996 
997 		/*
998 		 * Bridge MTU may change during addition of the first port.
999 		 * If it did, do network layer specific procedure.
1000 		 */
1001 		if (ifp->if_mtu != oldmtu)
1002 			if_notifymtu(ifp);
1003 
1004 		if (bc->bc_flags & BC_F_COPYOUT)
1005 			error = copyout(&args, ifd->ifd_data, ifd->ifd_len);
1006 
1007 		break;
1008 
1009 	case SIOCSIFFLAGS:
1010 		if (!(ifp->if_flags & IFF_UP) &&
1011 		    (ifp->if_drv_flags & IFF_DRV_RUNNING)) {
1012 			/*
1013 			 * If interface is marked down and it is running,
1014 			 * then stop and disable it.
1015 			 */
1016 			bridge_stop(ifp, 1);
1017 		} else if ((ifp->if_flags & IFF_UP) &&
1018 		    !(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
1019 			/*
1020 			 * If interface is marked up and it is stopped, then
1021 			 * start it.
1022 			 */
1023 			BRIDGE_UNLOCK(sc);
1024 			(*ifp->if_init)(sc);
1025 			BRIDGE_LOCK(sc);
1026 		}
1027 		break;
1028 
1029 	case SIOCSIFMTU:
1030 		oldmtu = sc->sc_ifp->if_mtu;
1031 
1032 		if (ifr->ifr_mtu < IF_MINMTU) {
1033 			error = EINVAL;
1034 			break;
1035 		}
1036 		if (CK_LIST_EMPTY(&sc->sc_iflist)) {
1037 			sc->sc_ifp->if_mtu = ifr->ifr_mtu;
1038 			break;
1039 		}
1040 		CK_LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
1041 			error = (*bif->bif_ifp->if_ioctl)(bif->bif_ifp,
1042 			    SIOCSIFMTU, (caddr_t)ifr);
1043 			if (error != 0) {
1044 				log(LOG_NOTICE, "%s: invalid MTU: %u for"
1045 				    " member %s\n", sc->sc_ifp->if_xname,
1046 				    ifr->ifr_mtu,
1047 				    bif->bif_ifp->if_xname);
1048 				error = EINVAL;
1049 				break;
1050 			}
1051 		}
1052 		if (error) {
1053 			/* Restore the previous MTU on all member interfaces. */
1054 			ifr->ifr_mtu = oldmtu;
1055 			CK_LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
1056 				(*bif->bif_ifp->if_ioctl)(bif->bif_ifp,
1057 				    SIOCSIFMTU, (caddr_t)ifr);
1058 			}
1059 		} else {
1060 			sc->sc_ifp->if_mtu = ifr->ifr_mtu;
1061 		}
1062 		break;
1063 	default:
1064 		/*
1065 		 * drop the lock as ether_ioctl() will call bridge_start() and
1066 		 * cause the lock to be recursed.
1067 		 */
1068 		BRIDGE_UNLOCK(sc);
1069 		error = ether_ioctl(ifp, cmd, data);
1070 		BRIDGE_LOCK(sc);
1071 		break;
1072 	}
1073 
1074 	BRIDGE_UNLOCK(sc);
1075 
1076 	return (error);
1077 }
1078 
1079 /*
1080  * bridge_mutecaps:
1081  *
1082  *	Clear or restore unwanted capabilities on the member interface
1083  */
1084 static void
bridge_mutecaps(struct bridge_softc * sc)1085 bridge_mutecaps(struct bridge_softc *sc)
1086 {
1087 	struct bridge_iflist *bif;
1088 	int enabled, mask;
1089 
1090 	BRIDGE_LOCK_ASSERT(sc);
1091 
1092 	/* Initial bitmask of capabilities to test */
1093 	mask = BRIDGE_IFCAPS_MASK;
1094 
1095 	CK_LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
1096 		/* Every member must support it or its disabled */
1097 		mask &= bif->bif_savedcaps;
1098 	}
1099 
1100 	CK_LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
1101 		enabled = bif->bif_ifp->if_capenable;
1102 		enabled &= ~BRIDGE_IFCAPS_STRIP;
1103 		/* strip off mask bits and enable them again if allowed */
1104 		enabled &= ~BRIDGE_IFCAPS_MASK;
1105 		enabled |= mask;
1106 		bridge_set_ifcap(sc, bif, enabled);
1107 	}
1108 }
1109 
1110 static void
bridge_set_ifcap(struct bridge_softc * sc,struct bridge_iflist * bif,int set)1111 bridge_set_ifcap(struct bridge_softc *sc, struct bridge_iflist *bif, int set)
1112 {
1113 	struct ifnet *ifp = bif->bif_ifp;
1114 	struct ifreq ifr;
1115 	int error, mask, stuck;
1116 
1117 	bzero(&ifr, sizeof(ifr));
1118 	ifr.ifr_reqcap = set;
1119 
1120 	if (ifp->if_capenable != set) {
1121 		error = (*ifp->if_ioctl)(ifp, SIOCSIFCAP, (caddr_t)&ifr);
1122 		if (error)
1123 			if_printf(sc->sc_ifp,
1124 			    "error setting capabilities on %s: %d\n",
1125 			    ifp->if_xname, error);
1126 		mask = BRIDGE_IFCAPS_MASK | BRIDGE_IFCAPS_STRIP;
1127 		stuck = ifp->if_capenable & mask & ~set;
1128 		if (stuck != 0)
1129 			if_printf(sc->sc_ifp,
1130 			    "can't disable some capabilities on %s: 0x%x\n",
1131 			    ifp->if_xname, stuck);
1132 	}
1133 }
1134 
1135 /*
1136  * bridge_lookup_member:
1137  *
1138  *	Lookup a bridge member interface.
1139  */
1140 static struct bridge_iflist *
bridge_lookup_member(struct bridge_softc * sc,const char * name)1141 bridge_lookup_member(struct bridge_softc *sc, const char *name)
1142 {
1143 	struct bridge_iflist *bif;
1144 	struct ifnet *ifp;
1145 
1146 	BRIDGE_LOCK_OR_NET_EPOCH_ASSERT(sc);
1147 
1148 	CK_LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
1149 		ifp = bif->bif_ifp;
1150 		if (strcmp(ifp->if_xname, name) == 0)
1151 			return (bif);
1152 	}
1153 
1154 	return (NULL);
1155 }
1156 
1157 /*
1158  * bridge_lookup_member_if:
1159  *
1160  *	Lookup a bridge member interface by ifnet*.
1161  */
1162 static struct bridge_iflist *
bridge_lookup_member_if(struct bridge_softc * sc,struct ifnet * member_ifp)1163 bridge_lookup_member_if(struct bridge_softc *sc, struct ifnet *member_ifp)
1164 {
1165 	BRIDGE_LOCK_OR_NET_EPOCH_ASSERT(sc);
1166 	return (member_ifp->if_bridge);
1167 }
1168 
1169 static void
bridge_delete_member_cb(struct epoch_context * ctx)1170 bridge_delete_member_cb(struct epoch_context *ctx)
1171 {
1172 	struct bridge_iflist *bif;
1173 
1174 	bif = __containerof(ctx, struct bridge_iflist, bif_epoch_ctx);
1175 
1176 	free(bif, M_DEVBUF);
1177 }
1178 
1179 /*
1180  * bridge_delete_member:
1181  *
1182  *	Delete the specified member interface.
1183  */
1184 static void
bridge_delete_member(struct bridge_softc * sc,struct bridge_iflist * bif,int gone)1185 bridge_delete_member(struct bridge_softc *sc, struct bridge_iflist *bif,
1186     int gone)
1187 {
1188 	struct ifnet *ifs = bif->bif_ifp;
1189 	struct ifnet *fif = NULL;
1190 	struct bridge_iflist *bifl;
1191 
1192 	BRIDGE_LOCK_ASSERT(sc);
1193 
1194 	if (bif->bif_flags & IFBIF_STP)
1195 		bstp_disable(&bif->bif_stp);
1196 
1197 	ifs->if_bridge = NULL;
1198 	CK_LIST_REMOVE(bif, bif_next);
1199 
1200 	/*
1201 	 * If removing the interface that gave the bridge its mac address, set
1202 	 * the mac address of the bridge to the address of the next member, or
1203 	 * to its default address if no members are left.
1204 	 */
1205 	if (V_bridge_inherit_mac && sc->sc_ifaddr == ifs) {
1206 		if (CK_LIST_EMPTY(&sc->sc_iflist)) {
1207 			bcopy(&sc->sc_defaddr,
1208 			    IF_LLADDR(sc->sc_ifp), ETHER_ADDR_LEN);
1209 			sc->sc_ifaddr = NULL;
1210 		} else {
1211 			bifl = CK_LIST_FIRST(&sc->sc_iflist);
1212 			fif = bifl->bif_ifp;
1213 			bcopy(IF_LLADDR(fif),
1214 			    IF_LLADDR(sc->sc_ifp), ETHER_ADDR_LEN);
1215 			sc->sc_ifaddr = fif;
1216 		}
1217 		EVENTHANDLER_INVOKE(iflladdr_event, sc->sc_ifp);
1218 	}
1219 
1220 	bridge_linkcheck(sc);
1221 	bridge_mutecaps(sc);	/* recalcuate now this interface is removed */
1222 	BRIDGE_RT_LOCK(sc);
1223 	bridge_rtdelete(sc, ifs, IFBF_FLUSHALL);
1224 	BRIDGE_RT_UNLOCK(sc);
1225 	KASSERT(bif->bif_addrcnt == 0,
1226 	    ("%s: %d bridge routes referenced", __func__, bif->bif_addrcnt));
1227 
1228 	ifs->if_bridge_output = NULL;
1229 	ifs->if_bridge_input = NULL;
1230 	ifs->if_bridge_linkstate = NULL;
1231 	if (!gone) {
1232 		switch (ifs->if_type) {
1233 		case IFT_ETHER:
1234 		case IFT_L2VLAN:
1235 			/*
1236 			 * Take the interface out of promiscuous mode, but only
1237 			 * if it was promiscuous in the first place. It might
1238 			 * not be if we're in the bridge_ioctl_add() error path.
1239 			 */
1240 			if (ifs->if_flags & IFF_PROMISC)
1241 				(void) ifpromisc(ifs, 0);
1242 			break;
1243 
1244 		case IFT_GIF:
1245 			break;
1246 
1247 		default:
1248 #ifdef DIAGNOSTIC
1249 			panic("bridge_delete_member: impossible");
1250 #endif
1251 			break;
1252 		}
1253 		/* reneable any interface capabilities */
1254 		bridge_set_ifcap(sc, bif, bif->bif_savedcaps);
1255 	}
1256 	bstp_destroy(&bif->bif_stp);	/* prepare to free */
1257 
1258 	NET_EPOCH_CALL(bridge_delete_member_cb, &bif->bif_epoch_ctx);
1259 }
1260 
1261 /*
1262  * bridge_delete_span:
1263  *
1264  *	Delete the specified span interface.
1265  */
1266 static void
bridge_delete_span(struct bridge_softc * sc,struct bridge_iflist * bif)1267 bridge_delete_span(struct bridge_softc *sc, struct bridge_iflist *bif)
1268 {
1269 	BRIDGE_LOCK_ASSERT(sc);
1270 
1271 	KASSERT(bif->bif_ifp->if_bridge == NULL,
1272 	    ("%s: not a span interface", __func__));
1273 
1274 	CK_LIST_REMOVE(bif, bif_next);
1275 
1276 	NET_EPOCH_CALL(bridge_delete_member_cb, &bif->bif_epoch_ctx);
1277 }
1278 
1279 static int
bridge_ioctl_add(struct bridge_softc * sc,void * arg)1280 bridge_ioctl_add(struct bridge_softc *sc, void *arg)
1281 {
1282 	struct ifbreq *req = arg;
1283 	struct bridge_iflist *bif = NULL;
1284 	struct ifnet *ifs;
1285 	int error = 0;
1286 
1287 	ifs = ifunit(req->ifbr_ifsname);
1288 	if (ifs == NULL)
1289 		return (ENOENT);
1290 	if (ifs->if_ioctl == NULL)	/* must be supported */
1291 		return (EINVAL);
1292 
1293 	/* If it's in the span list, it can't be a member. */
1294 	CK_LIST_FOREACH(bif, &sc->sc_spanlist, bif_next)
1295 		if (ifs == bif->bif_ifp)
1296 			return (EBUSY);
1297 
1298 	if (ifs->if_bridge) {
1299 		struct bridge_iflist *sbif = ifs->if_bridge;
1300 		if (sbif->bif_sc == sc)
1301 			return (EEXIST);
1302 
1303 		return (EBUSY);
1304 	}
1305 
1306 	switch (ifs->if_type) {
1307 	case IFT_ETHER:
1308 	case IFT_L2VLAN:
1309 	case IFT_GIF:
1310 		/* permitted interface types */
1311 		break;
1312 	default:
1313 		return (EINVAL);
1314 	}
1315 
1316 #ifdef INET6
1317 	/*
1318 	 * Two valid inet6 addresses with link-local scope must not be
1319 	 * on the parent interface and the member interfaces at the
1320 	 * same time.  This restriction is needed to prevent violation
1321 	 * of link-local scope zone.  Attempts to add a member
1322 	 * interface which has inet6 addresses when the parent has
1323 	 * inet6 triggers removal of all inet6 addresses on the member
1324 	 * interface.
1325 	 */
1326 
1327 	/* Check if the parent interface has a link-local scope addr. */
1328 	if (V_allow_llz_overlap == 0 &&
1329 	    in6ifa_llaonifp(sc->sc_ifp) != NULL) {
1330 		/*
1331 		 * If any, remove all inet6 addresses from the member
1332 		 * interfaces.
1333 		 */
1334 		CK_LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
1335  			if (in6ifa_llaonifp(bif->bif_ifp)) {
1336 				in6_ifdetach(bif->bif_ifp);
1337 				if_printf(sc->sc_ifp,
1338 				    "IPv6 addresses on %s have been removed "
1339 				    "before adding it as a member to prevent "
1340 				    "IPv6 address scope violation.\n",
1341 				    bif->bif_ifp->if_xname);
1342 			}
1343 		}
1344 		if (in6ifa_llaonifp(ifs)) {
1345 			in6_ifdetach(ifs);
1346 			if_printf(sc->sc_ifp,
1347 			    "IPv6 addresses on %s have been removed "
1348 			    "before adding it as a member to prevent "
1349 			    "IPv6 address scope violation.\n",
1350 			    ifs->if_xname);
1351 		}
1352 	}
1353 #endif
1354 	/* Allow the first Ethernet member to define the MTU */
1355 	if (CK_LIST_EMPTY(&sc->sc_iflist))
1356 		sc->sc_ifp->if_mtu = ifs->if_mtu;
1357 	else if (sc->sc_ifp->if_mtu != ifs->if_mtu) {
1358 		struct ifreq ifr;
1359 
1360 		snprintf(ifr.ifr_name, sizeof(ifr.ifr_name), "%s",
1361 		    ifs->if_xname);
1362 		ifr.ifr_mtu = sc->sc_ifp->if_mtu;
1363 
1364 		error = (*ifs->if_ioctl)(ifs,
1365 		    SIOCSIFMTU, (caddr_t)&ifr);
1366 		if (error != 0) {
1367 			log(LOG_NOTICE, "%s: invalid MTU: %u for"
1368 			    " new member %s\n", sc->sc_ifp->if_xname,
1369 			    ifr.ifr_mtu,
1370 			    ifs->if_xname);
1371 			return (EINVAL);
1372 		}
1373 	}
1374 
1375 	bif = malloc(sizeof(*bif), M_DEVBUF, M_NOWAIT|M_ZERO);
1376 	if (bif == NULL)
1377 		return (ENOMEM);
1378 
1379 	bif->bif_sc = sc;
1380 	bif->bif_ifp = ifs;
1381 	bif->bif_flags = IFBIF_LEARNING | IFBIF_DISCOVER;
1382 	bif->bif_savedcaps = ifs->if_capenable;
1383 
1384 	/*
1385 	 * Assign the interface's MAC address to the bridge if it's the first
1386 	 * member and the MAC address of the bridge has not been changed from
1387 	 * the default randomly generated one.
1388 	 */
1389 	if (V_bridge_inherit_mac && CK_LIST_EMPTY(&sc->sc_iflist) &&
1390 	    !memcmp(IF_LLADDR(sc->sc_ifp), sc->sc_defaddr.octet, ETHER_ADDR_LEN)) {
1391 		bcopy(IF_LLADDR(ifs), IF_LLADDR(sc->sc_ifp), ETHER_ADDR_LEN);
1392 		sc->sc_ifaddr = ifs;
1393 		EVENTHANDLER_INVOKE(iflladdr_event, sc->sc_ifp);
1394 	}
1395 
1396 	ifs->if_bridge = bif;
1397 	ifs->if_bridge_output = bridge_output;
1398 	ifs->if_bridge_input = bridge_input;
1399 	ifs->if_bridge_linkstate = bridge_linkstate;
1400 	bstp_create(&sc->sc_stp, &bif->bif_stp, bif->bif_ifp);
1401 	/*
1402 	 * XXX: XLOCK HERE!?!
1403 	 *
1404 	 * NOTE: insert_***HEAD*** should be safe for the traversals.
1405 	 */
1406 	CK_LIST_INSERT_HEAD(&sc->sc_iflist, bif, bif_next);
1407 
1408 	/* Set interface capabilities to the intersection set of all members */
1409 	bridge_mutecaps(sc);
1410 	bridge_linkcheck(sc);
1411 
1412 	/* Place the interface into promiscuous mode */
1413 	switch (ifs->if_type) {
1414 		case IFT_ETHER:
1415 		case IFT_L2VLAN:
1416 			error = ifpromisc(ifs, 1);
1417 			break;
1418 	}
1419 
1420 	if (error)
1421 		bridge_delete_member(sc, bif, 0);
1422 	return (error);
1423 }
1424 
1425 static int
bridge_ioctl_del(struct bridge_softc * sc,void * arg)1426 bridge_ioctl_del(struct bridge_softc *sc, void *arg)
1427 {
1428 	struct ifbreq *req = arg;
1429 	struct bridge_iflist *bif;
1430 
1431 	bif = bridge_lookup_member(sc, req->ifbr_ifsname);
1432 	if (bif == NULL)
1433 		return (ENOENT);
1434 
1435 	bridge_delete_member(sc, bif, 0);
1436 
1437 	return (0);
1438 }
1439 
1440 static int
bridge_ioctl_gifflags(struct bridge_softc * sc,void * arg)1441 bridge_ioctl_gifflags(struct bridge_softc *sc, void *arg)
1442 {
1443 	struct ifbreq *req = arg;
1444 	struct bridge_iflist *bif;
1445 	struct bstp_port *bp;
1446 
1447 	bif = bridge_lookup_member(sc, req->ifbr_ifsname);
1448 	if (bif == NULL)
1449 		return (ENOENT);
1450 
1451 	bp = &bif->bif_stp;
1452 	req->ifbr_ifsflags = bif->bif_flags;
1453 	req->ifbr_state = bp->bp_state;
1454 	req->ifbr_priority = bp->bp_priority;
1455 	req->ifbr_path_cost = bp->bp_path_cost;
1456 	req->ifbr_portno = bif->bif_ifp->if_index & 0xfff;
1457 	req->ifbr_proto = bp->bp_protover;
1458 	req->ifbr_role = bp->bp_role;
1459 	req->ifbr_stpflags = bp->bp_flags;
1460 	req->ifbr_addrcnt = bif->bif_addrcnt;
1461 	req->ifbr_addrmax = bif->bif_addrmax;
1462 	req->ifbr_addrexceeded = bif->bif_addrexceeded;
1463 
1464 	/* Copy STP state options as flags */
1465 	if (bp->bp_operedge)
1466 		req->ifbr_ifsflags |= IFBIF_BSTP_EDGE;
1467 	if (bp->bp_flags & BSTP_PORT_AUTOEDGE)
1468 		req->ifbr_ifsflags |= IFBIF_BSTP_AUTOEDGE;
1469 	if (bp->bp_ptp_link)
1470 		req->ifbr_ifsflags |= IFBIF_BSTP_PTP;
1471 	if (bp->bp_flags & BSTP_PORT_AUTOPTP)
1472 		req->ifbr_ifsflags |= IFBIF_BSTP_AUTOPTP;
1473 	if (bp->bp_flags & BSTP_PORT_ADMEDGE)
1474 		req->ifbr_ifsflags |= IFBIF_BSTP_ADMEDGE;
1475 	if (bp->bp_flags & BSTP_PORT_ADMCOST)
1476 		req->ifbr_ifsflags |= IFBIF_BSTP_ADMCOST;
1477 	return (0);
1478 }
1479 
1480 static int
bridge_ioctl_sifflags(struct bridge_softc * sc,void * arg)1481 bridge_ioctl_sifflags(struct bridge_softc *sc, void *arg)
1482 {
1483 	struct epoch_tracker et;
1484 	struct ifbreq *req = arg;
1485 	struct bridge_iflist *bif;
1486 	struct bstp_port *bp;
1487 	int error;
1488 
1489 	bif = bridge_lookup_member(sc, req->ifbr_ifsname);
1490 	if (bif == NULL)
1491 		return (ENOENT);
1492 	bp = &bif->bif_stp;
1493 
1494 	if (req->ifbr_ifsflags & IFBIF_SPAN)
1495 		/* SPAN is readonly */
1496 		return (EINVAL);
1497 
1498 	NET_EPOCH_ENTER(et);
1499 
1500 	if (req->ifbr_ifsflags & IFBIF_STP) {
1501 		if ((bif->bif_flags & IFBIF_STP) == 0) {
1502 			error = bstp_enable(&bif->bif_stp);
1503 			if (error) {
1504 				NET_EPOCH_EXIT(et);
1505 				return (error);
1506 			}
1507 		}
1508 	} else {
1509 		if ((bif->bif_flags & IFBIF_STP) != 0)
1510 			bstp_disable(&bif->bif_stp);
1511 	}
1512 
1513 	/* Pass on STP flags */
1514 	bstp_set_edge(bp, req->ifbr_ifsflags & IFBIF_BSTP_EDGE ? 1 : 0);
1515 	bstp_set_autoedge(bp, req->ifbr_ifsflags & IFBIF_BSTP_AUTOEDGE ? 1 : 0);
1516 	bstp_set_ptp(bp, req->ifbr_ifsflags & IFBIF_BSTP_PTP ? 1 : 0);
1517 	bstp_set_autoptp(bp, req->ifbr_ifsflags & IFBIF_BSTP_AUTOPTP ? 1 : 0);
1518 
1519 	/* Save the bits relating to the bridge */
1520 	bif->bif_flags = req->ifbr_ifsflags & IFBIFMASK;
1521 
1522 	NET_EPOCH_EXIT(et);
1523 
1524 	return (0);
1525 }
1526 
1527 static int
bridge_ioctl_scache(struct bridge_softc * sc,void * arg)1528 bridge_ioctl_scache(struct bridge_softc *sc, void *arg)
1529 {
1530 	struct ifbrparam *param = arg;
1531 
1532 	sc->sc_brtmax = param->ifbrp_csize;
1533 	bridge_rttrim(sc);
1534 
1535 	return (0);
1536 }
1537 
1538 static int
bridge_ioctl_gcache(struct bridge_softc * sc,void * arg)1539 bridge_ioctl_gcache(struct bridge_softc *sc, void *arg)
1540 {
1541 	struct ifbrparam *param = arg;
1542 
1543 	param->ifbrp_csize = sc->sc_brtmax;
1544 
1545 	return (0);
1546 }
1547 
1548 static int
bridge_ioctl_gifs(struct bridge_softc * sc,void * arg)1549 bridge_ioctl_gifs(struct bridge_softc *sc, void *arg)
1550 {
1551 	struct ifbifconf *bifc = arg;
1552 	struct bridge_iflist *bif;
1553 	struct ifbreq breq;
1554 	char *buf, *outbuf;
1555 	int count, buflen, len, error = 0;
1556 
1557 	count = 0;
1558 	CK_LIST_FOREACH(bif, &sc->sc_iflist, bif_next)
1559 		count++;
1560 	CK_LIST_FOREACH(bif, &sc->sc_spanlist, bif_next)
1561 		count++;
1562 
1563 	buflen = sizeof(breq) * count;
1564 	if (bifc->ifbic_len == 0) {
1565 		bifc->ifbic_len = buflen;
1566 		return (0);
1567 	}
1568 	outbuf = malloc(buflen, M_TEMP, M_NOWAIT | M_ZERO);
1569 	if (outbuf == NULL)
1570 		return (ENOMEM);
1571 
1572 	count = 0;
1573 	buf = outbuf;
1574 	len = min(bifc->ifbic_len, buflen);
1575 	bzero(&breq, sizeof(breq));
1576 	CK_LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
1577 		if (len < sizeof(breq))
1578 			break;
1579 
1580 		strlcpy(breq.ifbr_ifsname, bif->bif_ifp->if_xname,
1581 		    sizeof(breq.ifbr_ifsname));
1582 		/* Fill in the ifbreq structure */
1583 		error = bridge_ioctl_gifflags(sc, &breq);
1584 		if (error)
1585 			break;
1586 		memcpy(buf, &breq, sizeof(breq));
1587 		count++;
1588 		buf += sizeof(breq);
1589 		len -= sizeof(breq);
1590 	}
1591 	CK_LIST_FOREACH(bif, &sc->sc_spanlist, bif_next) {
1592 		if (len < sizeof(breq))
1593 			break;
1594 
1595 		strlcpy(breq.ifbr_ifsname, bif->bif_ifp->if_xname,
1596 		    sizeof(breq.ifbr_ifsname));
1597 		breq.ifbr_ifsflags = bif->bif_flags;
1598 		breq.ifbr_portno = bif->bif_ifp->if_index & 0xfff;
1599 		memcpy(buf, &breq, sizeof(breq));
1600 		count++;
1601 		buf += sizeof(breq);
1602 		len -= sizeof(breq);
1603 	}
1604 
1605 	bifc->ifbic_len = sizeof(breq) * count;
1606 	error = copyout(outbuf, bifc->ifbic_req, bifc->ifbic_len);
1607 	free(outbuf, M_TEMP);
1608 	return (error);
1609 }
1610 
1611 static int
bridge_ioctl_rts(struct bridge_softc * sc,void * arg)1612 bridge_ioctl_rts(struct bridge_softc *sc, void *arg)
1613 {
1614 	struct ifbaconf *bac = arg;
1615 	struct bridge_rtnode *brt;
1616 	struct ifbareq bareq;
1617 	char *buf, *outbuf;
1618 	int count, buflen, len, error = 0;
1619 
1620 	if (bac->ifbac_len == 0)
1621 		return (0);
1622 
1623 	count = 0;
1624 	CK_LIST_FOREACH(brt, &sc->sc_rtlist, brt_list)
1625 		count++;
1626 	buflen = sizeof(bareq) * count;
1627 
1628 	outbuf = malloc(buflen, M_TEMP, M_NOWAIT | M_ZERO);
1629 	if (outbuf == NULL)
1630 		return (ENOMEM);
1631 
1632 	count = 0;
1633 	buf = outbuf;
1634 	len = min(bac->ifbac_len, buflen);
1635 	bzero(&bareq, sizeof(bareq));
1636 	CK_LIST_FOREACH(brt, &sc->sc_rtlist, brt_list) {
1637 		if (len < sizeof(bareq))
1638 			goto out;
1639 		strlcpy(bareq.ifba_ifsname, brt->brt_ifp->if_xname,
1640 		    sizeof(bareq.ifba_ifsname));
1641 		memcpy(bareq.ifba_dst, brt->brt_addr, sizeof(brt->brt_addr));
1642 		bareq.ifba_vlan = brt->brt_vlan;
1643 		if ((brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC &&
1644 				time_uptime < brt->brt_expire)
1645 			bareq.ifba_expire = brt->brt_expire - time_uptime;
1646 		else
1647 			bareq.ifba_expire = 0;
1648 		bareq.ifba_flags = brt->brt_flags;
1649 
1650 		memcpy(buf, &bareq, sizeof(bareq));
1651 		count++;
1652 		buf += sizeof(bareq);
1653 		len -= sizeof(bareq);
1654 	}
1655 out:
1656 	bac->ifbac_len = sizeof(bareq) * count;
1657 	error = copyout(outbuf, bac->ifbac_req, bac->ifbac_len);
1658 	free(outbuf, M_TEMP);
1659 	return (error);
1660 }
1661 
1662 static int
bridge_ioctl_saddr(struct bridge_softc * sc,void * arg)1663 bridge_ioctl_saddr(struct bridge_softc *sc, void *arg)
1664 {
1665 	struct ifbareq *req = arg;
1666 	struct bridge_iflist *bif;
1667 	struct epoch_tracker et;
1668 	int error;
1669 
1670 	NET_EPOCH_ENTER(et);
1671 	bif = bridge_lookup_member(sc, req->ifba_ifsname);
1672 	if (bif == NULL) {
1673 		NET_EPOCH_EXIT(et);
1674 		return (ENOENT);
1675 	}
1676 
1677 	/* bridge_rtupdate() may acquire the lock. */
1678 	error = bridge_rtupdate(sc, req->ifba_dst, req->ifba_vlan, bif, 1,
1679 	    req->ifba_flags);
1680 	NET_EPOCH_EXIT(et);
1681 
1682 	return (error);
1683 }
1684 
1685 static int
bridge_ioctl_sto(struct bridge_softc * sc,void * arg)1686 bridge_ioctl_sto(struct bridge_softc *sc, void *arg)
1687 {
1688 	struct ifbrparam *param = arg;
1689 
1690 	sc->sc_brttimeout = param->ifbrp_ctime;
1691 	return (0);
1692 }
1693 
1694 static int
bridge_ioctl_gto(struct bridge_softc * sc,void * arg)1695 bridge_ioctl_gto(struct bridge_softc *sc, void *arg)
1696 {
1697 	struct ifbrparam *param = arg;
1698 
1699 	param->ifbrp_ctime = sc->sc_brttimeout;
1700 	return (0);
1701 }
1702 
1703 static int
bridge_ioctl_daddr(struct bridge_softc * sc,void * arg)1704 bridge_ioctl_daddr(struct bridge_softc *sc, void *arg)
1705 {
1706 	struct ifbareq *req = arg;
1707 	int vlan = req->ifba_vlan;
1708 
1709 	/* Userspace uses '0' to mean 'any vlan' */
1710 	if (vlan == 0)
1711 		vlan = DOT1Q_VID_RSVD_IMPL;
1712 
1713 	return (bridge_rtdaddr(sc, req->ifba_dst, vlan));
1714 }
1715 
1716 static int
bridge_ioctl_flush(struct bridge_softc * sc,void * arg)1717 bridge_ioctl_flush(struct bridge_softc *sc, void *arg)
1718 {
1719 	struct ifbreq *req = arg;
1720 
1721 	BRIDGE_RT_LOCK(sc);
1722 	bridge_rtflush(sc, req->ifbr_ifsflags);
1723 	BRIDGE_RT_UNLOCK(sc);
1724 
1725 	return (0);
1726 }
1727 
1728 static int
bridge_ioctl_gpri(struct bridge_softc * sc,void * arg)1729 bridge_ioctl_gpri(struct bridge_softc *sc, void *arg)
1730 {
1731 	struct ifbrparam *param = arg;
1732 	struct bstp_state *bs = &sc->sc_stp;
1733 
1734 	param->ifbrp_prio = bs->bs_bridge_priority;
1735 	return (0);
1736 }
1737 
1738 static int
bridge_ioctl_spri(struct bridge_softc * sc,void * arg)1739 bridge_ioctl_spri(struct bridge_softc *sc, void *arg)
1740 {
1741 	struct ifbrparam *param = arg;
1742 
1743 	return (bstp_set_priority(&sc->sc_stp, param->ifbrp_prio));
1744 }
1745 
1746 static int
bridge_ioctl_ght(struct bridge_softc * sc,void * arg)1747 bridge_ioctl_ght(struct bridge_softc *sc, void *arg)
1748 {
1749 	struct ifbrparam *param = arg;
1750 	struct bstp_state *bs = &sc->sc_stp;
1751 
1752 	param->ifbrp_hellotime = bs->bs_bridge_htime >> 8;
1753 	return (0);
1754 }
1755 
1756 static int
bridge_ioctl_sht(struct bridge_softc * sc,void * arg)1757 bridge_ioctl_sht(struct bridge_softc *sc, void *arg)
1758 {
1759 	struct ifbrparam *param = arg;
1760 
1761 	return (bstp_set_htime(&sc->sc_stp, param->ifbrp_hellotime));
1762 }
1763 
1764 static int
bridge_ioctl_gfd(struct bridge_softc * sc,void * arg)1765 bridge_ioctl_gfd(struct bridge_softc *sc, void *arg)
1766 {
1767 	struct ifbrparam *param = arg;
1768 	struct bstp_state *bs = &sc->sc_stp;
1769 
1770 	param->ifbrp_fwddelay = bs->bs_bridge_fdelay >> 8;
1771 	return (0);
1772 }
1773 
1774 static int
bridge_ioctl_sfd(struct bridge_softc * sc,void * arg)1775 bridge_ioctl_sfd(struct bridge_softc *sc, void *arg)
1776 {
1777 	struct ifbrparam *param = arg;
1778 
1779 	return (bstp_set_fdelay(&sc->sc_stp, param->ifbrp_fwddelay));
1780 }
1781 
1782 static int
bridge_ioctl_gma(struct bridge_softc * sc,void * arg)1783 bridge_ioctl_gma(struct bridge_softc *sc, void *arg)
1784 {
1785 	struct ifbrparam *param = arg;
1786 	struct bstp_state *bs = &sc->sc_stp;
1787 
1788 	param->ifbrp_maxage = bs->bs_bridge_max_age >> 8;
1789 	return (0);
1790 }
1791 
1792 static int
bridge_ioctl_sma(struct bridge_softc * sc,void * arg)1793 bridge_ioctl_sma(struct bridge_softc *sc, void *arg)
1794 {
1795 	struct ifbrparam *param = arg;
1796 
1797 	return (bstp_set_maxage(&sc->sc_stp, param->ifbrp_maxage));
1798 }
1799 
1800 static int
bridge_ioctl_sifprio(struct bridge_softc * sc,void * arg)1801 bridge_ioctl_sifprio(struct bridge_softc *sc, void *arg)
1802 {
1803 	struct ifbreq *req = arg;
1804 	struct bridge_iflist *bif;
1805 
1806 	bif = bridge_lookup_member(sc, req->ifbr_ifsname);
1807 	if (bif == NULL)
1808 		return (ENOENT);
1809 
1810 	return (bstp_set_port_priority(&bif->bif_stp, req->ifbr_priority));
1811 }
1812 
1813 static int
bridge_ioctl_sifcost(struct bridge_softc * sc,void * arg)1814 bridge_ioctl_sifcost(struct bridge_softc *sc, void *arg)
1815 {
1816 	struct ifbreq *req = arg;
1817 	struct bridge_iflist *bif;
1818 
1819 	bif = bridge_lookup_member(sc, req->ifbr_ifsname);
1820 	if (bif == NULL)
1821 		return (ENOENT);
1822 
1823 	return (bstp_set_path_cost(&bif->bif_stp, req->ifbr_path_cost));
1824 }
1825 
1826 static int
bridge_ioctl_sifmaxaddr(struct bridge_softc * sc,void * arg)1827 bridge_ioctl_sifmaxaddr(struct bridge_softc *sc, void *arg)
1828 {
1829 	struct ifbreq *req = arg;
1830 	struct bridge_iflist *bif;
1831 
1832 	bif = bridge_lookup_member(sc, req->ifbr_ifsname);
1833 	if (bif == NULL)
1834 		return (ENOENT);
1835 
1836 	bif->bif_addrmax = req->ifbr_addrmax;
1837 	return (0);
1838 }
1839 
1840 static int
bridge_ioctl_addspan(struct bridge_softc * sc,void * arg)1841 bridge_ioctl_addspan(struct bridge_softc *sc, void *arg)
1842 {
1843 	struct ifbreq *req = arg;
1844 	struct bridge_iflist *bif = NULL;
1845 	struct ifnet *ifs;
1846 
1847 	ifs = ifunit(req->ifbr_ifsname);
1848 	if (ifs == NULL)
1849 		return (ENOENT);
1850 
1851 	CK_LIST_FOREACH(bif, &sc->sc_spanlist, bif_next)
1852 		if (ifs == bif->bif_ifp)
1853 			return (EBUSY);
1854 
1855 	if (ifs->if_bridge != NULL)
1856 		return (EBUSY);
1857 
1858 	switch (ifs->if_type) {
1859 		case IFT_ETHER:
1860 		case IFT_GIF:
1861 		case IFT_L2VLAN:
1862 			break;
1863 		default:
1864 			return (EINVAL);
1865 	}
1866 
1867 	bif = malloc(sizeof(*bif), M_DEVBUF, M_NOWAIT|M_ZERO);
1868 	if (bif == NULL)
1869 		return (ENOMEM);
1870 
1871 	bif->bif_ifp = ifs;
1872 	bif->bif_flags = IFBIF_SPAN;
1873 
1874 	CK_LIST_INSERT_HEAD(&sc->sc_spanlist, bif, bif_next);
1875 
1876 	return (0);
1877 }
1878 
1879 static int
bridge_ioctl_delspan(struct bridge_softc * sc,void * arg)1880 bridge_ioctl_delspan(struct bridge_softc *sc, void *arg)
1881 {
1882 	struct ifbreq *req = arg;
1883 	struct bridge_iflist *bif;
1884 	struct ifnet *ifs;
1885 
1886 	ifs = ifunit(req->ifbr_ifsname);
1887 	if (ifs == NULL)
1888 		return (ENOENT);
1889 
1890 	CK_LIST_FOREACH(bif, &sc->sc_spanlist, bif_next)
1891 		if (ifs == bif->bif_ifp)
1892 			break;
1893 
1894 	if (bif == NULL)
1895 		return (ENOENT);
1896 
1897 	bridge_delete_span(sc, bif);
1898 
1899 	return (0);
1900 }
1901 
1902 static int
bridge_ioctl_gbparam(struct bridge_softc * sc,void * arg)1903 bridge_ioctl_gbparam(struct bridge_softc *sc, void *arg)
1904 {
1905 	struct ifbropreq *req = arg;
1906 	struct bstp_state *bs = &sc->sc_stp;
1907 	struct bstp_port *root_port;
1908 
1909 	req->ifbop_maxage = bs->bs_bridge_max_age >> 8;
1910 	req->ifbop_hellotime = bs->bs_bridge_htime >> 8;
1911 	req->ifbop_fwddelay = bs->bs_bridge_fdelay >> 8;
1912 
1913 	root_port = bs->bs_root_port;
1914 	if (root_port == NULL)
1915 		req->ifbop_root_port = 0;
1916 	else
1917 		req->ifbop_root_port = root_port->bp_ifp->if_index;
1918 
1919 	req->ifbop_holdcount = bs->bs_txholdcount;
1920 	req->ifbop_priority = bs->bs_bridge_priority;
1921 	req->ifbop_protocol = bs->bs_protover;
1922 	req->ifbop_root_path_cost = bs->bs_root_pv.pv_cost;
1923 	req->ifbop_bridgeid = bs->bs_bridge_pv.pv_dbridge_id;
1924 	req->ifbop_designated_root = bs->bs_root_pv.pv_root_id;
1925 	req->ifbop_designated_bridge = bs->bs_root_pv.pv_dbridge_id;
1926 	req->ifbop_last_tc_time.tv_sec = bs->bs_last_tc_time.tv_sec;
1927 	req->ifbop_last_tc_time.tv_usec = bs->bs_last_tc_time.tv_usec;
1928 
1929 	return (0);
1930 }
1931 
1932 static int
bridge_ioctl_grte(struct bridge_softc * sc,void * arg)1933 bridge_ioctl_grte(struct bridge_softc *sc, void *arg)
1934 {
1935 	struct ifbrparam *param = arg;
1936 
1937 	param->ifbrp_cexceeded = sc->sc_brtexceeded;
1938 	return (0);
1939 }
1940 
1941 static int
bridge_ioctl_gifsstp(struct bridge_softc * sc,void * arg)1942 bridge_ioctl_gifsstp(struct bridge_softc *sc, void *arg)
1943 {
1944 	struct ifbpstpconf *bifstp = arg;
1945 	struct bridge_iflist *bif;
1946 	struct bstp_port *bp;
1947 	struct ifbpstpreq bpreq;
1948 	char *buf, *outbuf;
1949 	int count, buflen, len, error = 0;
1950 
1951 	count = 0;
1952 	CK_LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
1953 		if ((bif->bif_flags & IFBIF_STP) != 0)
1954 			count++;
1955 	}
1956 
1957 	buflen = sizeof(bpreq) * count;
1958 	if (bifstp->ifbpstp_len == 0) {
1959 		bifstp->ifbpstp_len = buflen;
1960 		return (0);
1961 	}
1962 
1963 	outbuf = malloc(buflen, M_TEMP, M_NOWAIT | M_ZERO);
1964 	if (outbuf == NULL)
1965 		return (ENOMEM);
1966 
1967 	count = 0;
1968 	buf = outbuf;
1969 	len = min(bifstp->ifbpstp_len, buflen);
1970 	bzero(&bpreq, sizeof(bpreq));
1971 	CK_LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
1972 		if (len < sizeof(bpreq))
1973 			break;
1974 
1975 		if ((bif->bif_flags & IFBIF_STP) == 0)
1976 			continue;
1977 
1978 		bp = &bif->bif_stp;
1979 		bpreq.ifbp_portno = bif->bif_ifp->if_index & 0xfff;
1980 		bpreq.ifbp_fwd_trans = bp->bp_forward_transitions;
1981 		bpreq.ifbp_design_cost = bp->bp_desg_pv.pv_cost;
1982 		bpreq.ifbp_design_port = bp->bp_desg_pv.pv_port_id;
1983 		bpreq.ifbp_design_bridge = bp->bp_desg_pv.pv_dbridge_id;
1984 		bpreq.ifbp_design_root = bp->bp_desg_pv.pv_root_id;
1985 
1986 		memcpy(buf, &bpreq, sizeof(bpreq));
1987 		count++;
1988 		buf += sizeof(bpreq);
1989 		len -= sizeof(bpreq);
1990 	}
1991 
1992 	bifstp->ifbpstp_len = sizeof(bpreq) * count;
1993 	error = copyout(outbuf, bifstp->ifbpstp_req, bifstp->ifbpstp_len);
1994 	free(outbuf, M_TEMP);
1995 	return (error);
1996 }
1997 
1998 static int
bridge_ioctl_sproto(struct bridge_softc * sc,void * arg)1999 bridge_ioctl_sproto(struct bridge_softc *sc, void *arg)
2000 {
2001 	struct ifbrparam *param = arg;
2002 
2003 	return (bstp_set_protocol(&sc->sc_stp, param->ifbrp_proto));
2004 }
2005 
2006 static int
bridge_ioctl_stxhc(struct bridge_softc * sc,void * arg)2007 bridge_ioctl_stxhc(struct bridge_softc *sc, void *arg)
2008 {
2009 	struct ifbrparam *param = arg;
2010 
2011 	return (bstp_set_holdcount(&sc->sc_stp, param->ifbrp_txhc));
2012 }
2013 
2014 /*
2015  * bridge_ifdetach:
2016  *
2017  *	Detach an interface from a bridge.  Called when a member
2018  *	interface is detaching.
2019  */
2020 static void
bridge_ifdetach(void * arg __unused,struct ifnet * ifp)2021 bridge_ifdetach(void *arg __unused, struct ifnet *ifp)
2022 {
2023 	struct bridge_iflist *bif = ifp->if_bridge;
2024 	struct bridge_softc *sc = NULL;
2025 
2026 	if (bif)
2027 		sc = bif->bif_sc;
2028 
2029 	if (ifp->if_flags & IFF_RENAMING)
2030 		return;
2031 	if (V_bridge_cloner == NULL) {
2032 		/*
2033 		 * This detach handler can be called after
2034 		 * vnet_bridge_uninit().  Just return in that case.
2035 		 */
2036 		return;
2037 	}
2038 	/* Check if the interface is a bridge member */
2039 	if (sc != NULL) {
2040 		BRIDGE_LOCK(sc);
2041 		bridge_delete_member(sc, bif, 1);
2042 		BRIDGE_UNLOCK(sc);
2043 		return;
2044 	}
2045 
2046 	/* Check if the interface is a span port */
2047 	BRIDGE_LIST_LOCK();
2048 	LIST_FOREACH(sc, &V_bridge_list, sc_list) {
2049 		BRIDGE_LOCK(sc);
2050 		CK_LIST_FOREACH(bif, &sc->sc_spanlist, bif_next)
2051 			if (ifp == bif->bif_ifp) {
2052 				bridge_delete_span(sc, bif);
2053 				break;
2054 			}
2055 
2056 		BRIDGE_UNLOCK(sc);
2057 	}
2058 	BRIDGE_LIST_UNLOCK();
2059 }
2060 
2061 /*
2062  * bridge_init:
2063  *
2064  *	Initialize a bridge interface.
2065  */
2066 static void
bridge_init(void * xsc)2067 bridge_init(void *xsc)
2068 {
2069 	struct bridge_softc *sc = (struct bridge_softc *)xsc;
2070 	struct ifnet *ifp = sc->sc_ifp;
2071 
2072 	if (ifp->if_drv_flags & IFF_DRV_RUNNING)
2073 		return;
2074 
2075 	BRIDGE_LOCK(sc);
2076 	callout_reset(&sc->sc_brcallout, bridge_rtable_prune_period * hz,
2077 	    bridge_timer, sc);
2078 
2079 	ifp->if_drv_flags |= IFF_DRV_RUNNING;
2080 	bstp_init(&sc->sc_stp);		/* Initialize Spanning Tree */
2081 
2082 	BRIDGE_UNLOCK(sc);
2083 }
2084 
2085 /*
2086  * bridge_stop:
2087  *
2088  *	Stop the bridge interface.
2089  */
2090 static void
bridge_stop(struct ifnet * ifp,int disable)2091 bridge_stop(struct ifnet *ifp, int disable)
2092 {
2093 	struct bridge_softc *sc = ifp->if_softc;
2094 
2095 	BRIDGE_LOCK_ASSERT(sc);
2096 
2097 	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
2098 		return;
2099 
2100 	BRIDGE_RT_LOCK(sc);
2101 	callout_stop(&sc->sc_brcallout);
2102 
2103 	bstp_stop(&sc->sc_stp);
2104 
2105 	bridge_rtflush(sc, IFBF_FLUSHDYN);
2106 	BRIDGE_RT_UNLOCK(sc);
2107 
2108 	ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2109 }
2110 
2111 /*
2112  * bridge_enqueue:
2113  *
2114  *	Enqueue a packet on a bridge member interface.
2115  *
2116  */
2117 static int
bridge_enqueue(struct bridge_softc * sc,struct ifnet * dst_ifp,struct mbuf * m)2118 bridge_enqueue(struct bridge_softc *sc, struct ifnet *dst_ifp, struct mbuf *m)
2119 {
2120 	int len, err = 0;
2121 	short mflags;
2122 	struct mbuf *m0;
2123 
2124 	/* We may be sending a fragment so traverse the mbuf */
2125 	for (; m; m = m0) {
2126 		m0 = m->m_nextpkt;
2127 		m->m_nextpkt = NULL;
2128 		len = m->m_pkthdr.len;
2129 		mflags = m->m_flags;
2130 
2131 		/*
2132 		 * If underlying interface can not do VLAN tag insertion itself
2133 		 * then attach a packet tag that holds it.
2134 		 */
2135 		if ((m->m_flags & M_VLANTAG) &&
2136 		    (dst_ifp->if_capenable & IFCAP_VLAN_HWTAGGING) == 0) {
2137 			m = ether_vlanencap(m, m->m_pkthdr.ether_vtag);
2138 			if (m == NULL) {
2139 				if_printf(dst_ifp,
2140 				    "unable to prepend VLAN header\n");
2141 				if_inc_counter(dst_ifp, IFCOUNTER_OERRORS, 1);
2142 				continue;
2143 			}
2144 			m->m_flags &= ~M_VLANTAG;
2145 		}
2146 
2147 		M_ASSERTPKTHDR(m); /* We shouldn't transmit mbuf without pkthdr */
2148 		if ((err = dst_ifp->if_transmit(dst_ifp, m))) {
2149 			int n;
2150 
2151 			for (m = m0, n = 1; m != NULL; m = m0, n++) {
2152 				m0 = m->m_nextpkt;
2153 				m_freem(m);
2154 			}
2155 			if_inc_counter(sc->sc_ifp, IFCOUNTER_OERRORS, n);
2156 			break;
2157 		}
2158 
2159 		if_inc_counter(sc->sc_ifp, IFCOUNTER_OPACKETS, 1);
2160 		if_inc_counter(sc->sc_ifp, IFCOUNTER_OBYTES, len);
2161 		if (mflags & M_MCAST)
2162 			if_inc_counter(sc->sc_ifp, IFCOUNTER_OMCASTS, 1);
2163 	}
2164 
2165 	return (err);
2166 }
2167 
2168 /*
2169  * bridge_dummynet:
2170  *
2171  * 	Receive a queued packet from dummynet and pass it on to the output
2172  * 	interface.
2173  *
2174  *	The mbuf has the Ethernet header already attached.
2175  */
2176 static void
bridge_dummynet(struct mbuf * m,struct ifnet * ifp)2177 bridge_dummynet(struct mbuf *m, struct ifnet *ifp)
2178 {
2179 	struct bridge_iflist *bif = ifp->if_bridge;
2180 	struct bridge_softc *sc = NULL;
2181 
2182 	if (bif)
2183 		sc = bif->bif_sc;
2184 
2185 	/*
2186 	 * The packet didnt originate from a member interface. This should only
2187 	 * ever happen if a member interface is removed while packets are
2188 	 * queued for it.
2189 	 */
2190 	if (sc == NULL) {
2191 		m_freem(m);
2192 		return;
2193 	}
2194 
2195 	if (PFIL_HOOKED_OUT_46) {
2196 		if (bridge_pfil(&m, sc->sc_ifp, ifp, PFIL_OUT) != 0)
2197 			return;
2198 		if (m == NULL)
2199 			return;
2200 	}
2201 
2202 	bridge_enqueue(sc, ifp, m);
2203 }
2204 
2205 /*
2206  * bridge_output:
2207  *
2208  *	Send output from a bridge member interface.  This
2209  *	performs the bridging function for locally originated
2210  *	packets.
2211  *
2212  *	The mbuf has the Ethernet header already attached.  We must
2213  *	enqueue or free the mbuf before returning.
2214  */
2215 static int
bridge_output(struct ifnet * ifp,struct mbuf * m,struct sockaddr * sa,struct rtentry * rt)2216 bridge_output(struct ifnet *ifp, struct mbuf *m, struct sockaddr *sa,
2217     struct rtentry *rt)
2218 {
2219 	struct ether_header *eh;
2220 	struct bridge_iflist *sbif;
2221 	struct ifnet *bifp, *dst_if;
2222 	struct bridge_softc *sc;
2223 	ether_vlanid_t vlan;
2224 
2225 	NET_EPOCH_ASSERT();
2226 
2227 	if (m->m_len < ETHER_HDR_LEN) {
2228 		m = m_pullup(m, ETHER_HDR_LEN);
2229 		if (m == NULL)
2230 			return (0);
2231 	}
2232 
2233 	sbif = ifp->if_bridge;
2234 	sc = sbif->bif_sc;
2235 	bifp = sc->sc_ifp;
2236 
2237 	eh = mtod(m, struct ether_header *);
2238 	vlan = VLANTAGOF(m);
2239 
2240 	/*
2241 	 * If bridge is down, but the original output interface is up,
2242 	 * go ahead and send out that interface.  Otherwise, the packet
2243 	 * is dropped below.
2244 	 */
2245 	if ((bifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
2246 		dst_if = ifp;
2247 		goto sendunicast;
2248 	}
2249 
2250 	/*
2251 	 * If the packet is a multicast, or we don't know a better way to
2252 	 * get there, send to all interfaces.
2253 	 */
2254 	if (ETHER_IS_MULTICAST(eh->ether_dhost))
2255 		dst_if = NULL;
2256 	else
2257 		dst_if = bridge_rtlookup(sc, eh->ether_dhost, vlan);
2258 	/* Tap any traffic not passing back out the originating interface */
2259 	if (dst_if != ifp)
2260 		ETHER_BPF_MTAP(bifp, m);
2261 	if (dst_if == NULL) {
2262 		struct bridge_iflist *bif;
2263 		struct mbuf *mc;
2264 		int used = 0;
2265 
2266 		bridge_span(sc, m);
2267 
2268 		CK_LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
2269 			dst_if = bif->bif_ifp;
2270 
2271 			if (dst_if->if_type == IFT_GIF)
2272 				continue;
2273 			if ((dst_if->if_drv_flags & IFF_DRV_RUNNING) == 0)
2274 				continue;
2275 
2276 			/*
2277 			 * If this is not the original output interface,
2278 			 * and the interface is participating in spanning
2279 			 * tree, make sure the port is in a state that
2280 			 * allows forwarding.
2281 			 */
2282 			if (dst_if != ifp && (bif->bif_flags & IFBIF_STP) &&
2283 			    bif->bif_stp.bp_state == BSTP_IFSTATE_DISCARDING)
2284 				continue;
2285 
2286 			if (CK_LIST_NEXT(bif, bif_next) == NULL) {
2287 				used = 1;
2288 				mc = m;
2289 			} else {
2290 				mc = m_dup(m, M_NOWAIT);
2291 				if (mc == NULL) {
2292 					if_inc_counter(bifp, IFCOUNTER_OERRORS, 1);
2293 					continue;
2294 				}
2295 			}
2296 
2297 			bridge_enqueue(sc, dst_if, mc);
2298 		}
2299 		if (used == 0)
2300 			m_freem(m);
2301 		return (0);
2302 	}
2303 
2304 sendunicast:
2305 	/*
2306 	 * XXX Spanning tree consideration here?
2307 	 */
2308 
2309 	bridge_span(sc, m);
2310 	if ((dst_if->if_drv_flags & IFF_DRV_RUNNING) == 0) {
2311 		m_freem(m);
2312 		return (0);
2313 	}
2314 
2315 	bridge_enqueue(sc, dst_if, m);
2316 	return (0);
2317 }
2318 
2319 /*
2320  * bridge_transmit:
2321  *
2322  *	Do output on a bridge.
2323  *
2324  */
2325 static int
bridge_transmit(struct ifnet * ifp,struct mbuf * m)2326 bridge_transmit(struct ifnet *ifp, struct mbuf *m)
2327 {
2328 	struct bridge_softc *sc;
2329 	struct ether_header *eh;
2330 	struct ifnet *dst_if;
2331 	int error = 0;
2332 
2333 	sc = ifp->if_softc;
2334 
2335 	ETHER_BPF_MTAP(ifp, m);
2336 
2337 	eh = mtod(m, struct ether_header *);
2338 
2339 	if (((m->m_flags & (M_BCAST|M_MCAST)) == 0) &&
2340 	    (dst_if = bridge_rtlookup(sc, eh->ether_dhost, DOT1Q_VID_NULL)) !=
2341 	    NULL) {
2342 		error = bridge_enqueue(sc, dst_if, m);
2343 	} else
2344 		bridge_broadcast(sc, ifp, m, 0);
2345 
2346 	return (error);
2347 }
2348 
2349 #ifdef ALTQ
2350 static void
bridge_altq_start(if_t ifp)2351 bridge_altq_start(if_t ifp)
2352 {
2353 	struct ifaltq *ifq = &ifp->if_snd;
2354 	struct mbuf *m;
2355 
2356 	IFQ_LOCK(ifq);
2357 	IFQ_DEQUEUE_NOLOCK(ifq, m);
2358 	while (m != NULL) {
2359 		bridge_transmit(ifp, m);
2360 		IFQ_DEQUEUE_NOLOCK(ifq, m);
2361 	}
2362 	IFQ_UNLOCK(ifq);
2363 }
2364 
2365 static int
bridge_altq_transmit(if_t ifp,struct mbuf * m)2366 bridge_altq_transmit(if_t ifp, struct mbuf *m)
2367 {
2368 	int err;
2369 
2370 	if (ALTQ_IS_ENABLED(&ifp->if_snd)) {
2371 		IFQ_ENQUEUE(&ifp->if_snd, m, err);
2372 		if (err == 0)
2373 			bridge_altq_start(ifp);
2374 	} else
2375 		err = bridge_transmit(ifp, m);
2376 
2377 	return (err);
2378 }
2379 #endif	/* ALTQ */
2380 
2381 /*
2382  * The ifp->if_qflush entry point for if_bridge(4) is no-op.
2383  */
2384 static void
bridge_qflush(struct ifnet * ifp __unused)2385 bridge_qflush(struct ifnet *ifp __unused)
2386 {
2387 }
2388 
2389 /*
2390  * bridge_forward:
2391  *
2392  *	The forwarding function of the bridge.
2393  *
2394  *	NOTE: Releases the lock on return.
2395  */
2396 static void
bridge_forward(struct bridge_softc * sc,struct bridge_iflist * sbif,struct mbuf * m)2397 bridge_forward(struct bridge_softc *sc, struct bridge_iflist *sbif,
2398     struct mbuf *m)
2399 {
2400 	struct bridge_iflist *dbif;
2401 	struct ifnet *src_if, *dst_if, *ifp;
2402 	struct ether_header *eh;
2403 	uint16_t vlan;
2404 	uint8_t *dst;
2405 	int error;
2406 
2407 	NET_EPOCH_ASSERT();
2408 
2409 	src_if = m->m_pkthdr.rcvif;
2410 	ifp = sc->sc_ifp;
2411 
2412 	if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
2413 	if_inc_counter(ifp, IFCOUNTER_IBYTES, m->m_pkthdr.len);
2414 	vlan = VLANTAGOF(m);
2415 
2416 	if ((sbif->bif_flags & IFBIF_STP) &&
2417 	    sbif->bif_stp.bp_state == BSTP_IFSTATE_DISCARDING)
2418 		goto drop;
2419 
2420 	eh = mtod(m, struct ether_header *);
2421 	dst = eh->ether_dhost;
2422 
2423 	/* If the interface is learning, record the address. */
2424 	if (sbif->bif_flags & IFBIF_LEARNING) {
2425 		error = bridge_rtupdate(sc, eh->ether_shost, vlan,
2426 		    sbif, 0, IFBAF_DYNAMIC);
2427 		/*
2428 		 * If the interface has addresses limits then deny any source
2429 		 * that is not in the cache.
2430 		 */
2431 		if (error && sbif->bif_addrmax)
2432 			goto drop;
2433 	}
2434 
2435 	if ((sbif->bif_flags & IFBIF_STP) != 0 &&
2436 	    sbif->bif_stp.bp_state == BSTP_IFSTATE_LEARNING)
2437 		goto drop;
2438 
2439 #ifdef DEV_NETMAP
2440 	/*
2441 	 * Hand the packet to netmap only if it wasn't injected by netmap
2442 	 * itself.
2443 	 */
2444 	if ((m->m_flags & M_BRIDGE_INJECT) == 0 &&
2445 	    (if_getcapenable(ifp) & IFCAP_NETMAP) != 0) {
2446 		ifp->if_input(ifp, m);
2447 		return;
2448 	}
2449 	m->m_flags &= ~M_BRIDGE_INJECT;
2450 #endif
2451 
2452 	/*
2453 	 * At this point, the port either doesn't participate
2454 	 * in spanning tree or it is in the forwarding state.
2455 	 */
2456 
2457 	/*
2458 	 * If the packet is unicast, destined for someone on
2459 	 * "this" side of the bridge, drop it.
2460 	 */
2461 	if ((m->m_flags & (M_BCAST|M_MCAST)) == 0) {
2462 		dst_if = bridge_rtlookup(sc, dst, vlan);
2463 		if (src_if == dst_if)
2464 			goto drop;
2465 	} else {
2466 		/*
2467 		 * Check if its a reserved multicast address, any address
2468 		 * listed in 802.1D section 7.12.6 may not be forwarded by the
2469 		 * bridge.
2470 		 * This is currently 01-80-C2-00-00-00 to 01-80-C2-00-00-0F
2471 		 */
2472 		if (dst[0] == 0x01 && dst[1] == 0x80 &&
2473 		    dst[2] == 0xc2 && dst[3] == 0x00 &&
2474 		    dst[4] == 0x00 && dst[5] <= 0x0f)
2475 			goto drop;
2476 
2477 		/* ...forward it to all interfaces. */
2478 		if_inc_counter(ifp, IFCOUNTER_IMCASTS, 1);
2479 		dst_if = NULL;
2480 	}
2481 
2482 	/*
2483 	 * If we have a destination interface which is a member of our bridge,
2484 	 * OR this is a unicast packet, push it through the bpf(4) machinery.
2485 	 * For broadcast or multicast packets, don't bother because it will
2486 	 * be reinjected into ether_input. We do this before we pass the packets
2487 	 * through the pfil(9) framework, as it is possible that pfil(9) will
2488 	 * drop the packet, or possibly modify it, making it difficult to debug
2489 	 * firewall issues on the bridge.
2490 	 */
2491 	if (dst_if != NULL || (m->m_flags & (M_BCAST | M_MCAST)) == 0)
2492 		ETHER_BPF_MTAP(ifp, m);
2493 
2494 	/* run the packet filter */
2495 	if (PFIL_HOOKED_IN_46) {
2496 		if (bridge_pfil(&m, ifp, src_if, PFIL_IN) != 0)
2497 			return;
2498 		if (m == NULL)
2499 			return;
2500 	}
2501 
2502 	if (dst_if == NULL) {
2503 		bridge_broadcast(sc, src_if, m, 1);
2504 		return;
2505 	}
2506 
2507 	/*
2508 	 * At this point, we're dealing with a unicast frame
2509 	 * going to a different interface.
2510 	 */
2511 	if ((dst_if->if_drv_flags & IFF_DRV_RUNNING) == 0)
2512 		goto drop;
2513 
2514 	dbif = bridge_lookup_member_if(sc, dst_if);
2515 	if (dbif == NULL)
2516 		/* Not a member of the bridge (anymore?) */
2517 		goto drop;
2518 
2519 	/* Private segments can not talk to each other */
2520 	if (sbif->bif_flags & dbif->bif_flags & IFBIF_PRIVATE)
2521 		goto drop;
2522 
2523 	if ((dbif->bif_flags & IFBIF_STP) &&
2524 	    dbif->bif_stp.bp_state == BSTP_IFSTATE_DISCARDING)
2525 		goto drop;
2526 
2527 	if (PFIL_HOOKED_OUT_46) {
2528 		if (bridge_pfil(&m, ifp, dst_if, PFIL_OUT) != 0)
2529 			return;
2530 		if (m == NULL)
2531 			return;
2532 	}
2533 
2534 	bridge_enqueue(sc, dst_if, m);
2535 	return;
2536 
2537 drop:
2538 	m_freem(m);
2539 }
2540 
2541 /*
2542  * bridge_input:
2543  *
2544  *	Receive input from a member interface.  Queue the packet for
2545  *	bridging if it is not for us.
2546  */
2547 static struct mbuf *
bridge_input(struct ifnet * ifp,struct mbuf * m)2548 bridge_input(struct ifnet *ifp, struct mbuf *m)
2549 {
2550 	struct bridge_softc *sc = NULL;
2551 	struct bridge_iflist *bif, *bif2;
2552 	struct ifnet *bifp;
2553 	struct ether_header *eh;
2554 	struct mbuf *mc, *mc2;
2555 	ether_vlanid_t vlan;
2556 	int error;
2557 
2558 	NET_EPOCH_ASSERT();
2559 
2560 	eh = mtod(m, struct ether_header *);
2561 	vlan = VLANTAGOF(m);
2562 
2563 	bif = ifp->if_bridge;
2564 	if (bif)
2565 		sc = bif->bif_sc;
2566 
2567 	if (sc == NULL) {
2568 		/*
2569 		 * This packet originated from the bridge itself, so it must
2570 		 * have been transmitted by netmap.  Derive the "source"
2571 		 * interface from the source address and drop the packet if the
2572 		 * source address isn't known.
2573 		 */
2574 		KASSERT((m->m_flags & M_BRIDGE_INJECT) != 0,
2575 		    ("%s: ifnet %p missing a bridge softc", __func__, ifp));
2576 		sc = if_getsoftc(ifp);
2577 		ifp = bridge_rtlookup(sc, eh->ether_shost, vlan);
2578 		if (ifp == NULL) {
2579 			if_inc_counter(sc->sc_ifp, IFCOUNTER_IERRORS, 1);
2580 			m_freem(m);
2581 			return (NULL);
2582 		}
2583 		m->m_pkthdr.rcvif = ifp;
2584 	}
2585 	bifp = sc->sc_ifp;
2586 	if ((bifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
2587 		return (m);
2588 
2589 	/*
2590 	 * Implement support for bridge monitoring. If this flag has been
2591 	 * set on this interface, discard the packet once we push it through
2592 	 * the bpf(4) machinery, but before we do, increment the byte and
2593 	 * packet counters associated with this interface.
2594 	 */
2595 	if ((bifp->if_flags & IFF_MONITOR) != 0) {
2596 		m->m_pkthdr.rcvif  = bifp;
2597 		ETHER_BPF_MTAP(bifp, m);
2598 		if_inc_counter(bifp, IFCOUNTER_IPACKETS, 1);
2599 		if_inc_counter(bifp, IFCOUNTER_IBYTES, m->m_pkthdr.len);
2600 		m_freem(m);
2601 		return (NULL);
2602 	}
2603 
2604 	bridge_span(sc, m);
2605 
2606 	if (m->m_flags & (M_BCAST|M_MCAST)) {
2607 		/* Tap off 802.1D packets; they do not get forwarded. */
2608 		if (memcmp(eh->ether_dhost, bstp_etheraddr,
2609 		    ETHER_ADDR_LEN) == 0) {
2610 			bstp_input(&bif->bif_stp, ifp, m); /* consumes mbuf */
2611 			return (NULL);
2612 		}
2613 
2614 		if ((bif->bif_flags & IFBIF_STP) &&
2615 		    bif->bif_stp.bp_state == BSTP_IFSTATE_DISCARDING) {
2616 			return (m);
2617 		}
2618 
2619 		/*
2620 		 * Make a deep copy of the packet and enqueue the copy
2621 		 * for bridge processing; return the original packet for
2622 		 * local processing.
2623 		 */
2624 		mc = m_dup(m, M_NOWAIT);
2625 		if (mc == NULL) {
2626 			return (m);
2627 		}
2628 
2629 		/* Perform the bridge forwarding function with the copy. */
2630 		bridge_forward(sc, bif, mc);
2631 
2632 #ifdef DEV_NETMAP
2633 		/*
2634 		 * If netmap is enabled and has not already seen this packet,
2635 		 * then it will be consumed by bridge_forward().
2636 		 */
2637 		if ((if_getcapenable(bifp) & IFCAP_NETMAP) != 0 &&
2638 		    (m->m_flags & M_BRIDGE_INJECT) == 0) {
2639 			m_freem(m);
2640 			return (NULL);
2641 		}
2642 #endif
2643 
2644 		/*
2645 		 * Reinject the mbuf as arriving on the bridge so we have a
2646 		 * chance at claiming multicast packets. We can not loop back
2647 		 * here from ether_input as a bridge is never a member of a
2648 		 * bridge.
2649 		 */
2650 		KASSERT(bifp->if_bridge == NULL,
2651 		    ("loop created in bridge_input"));
2652 		mc2 = m_dup(m, M_NOWAIT);
2653 		if (mc2 != NULL) {
2654 			/* Keep the layer3 header aligned */
2655 			int i = min(mc2->m_pkthdr.len, max_protohdr);
2656 			mc2 = m_copyup(mc2, i, ETHER_ALIGN);
2657 		}
2658 		if (mc2 != NULL) {
2659 			mc2->m_pkthdr.rcvif = bifp;
2660 			mc2->m_flags &= ~M_BRIDGE_INJECT;
2661 			sc->sc_if_input(bifp, mc2);
2662 		}
2663 
2664 		/* Return the original packet for local processing. */
2665 		return (m);
2666 	}
2667 
2668 	if ((bif->bif_flags & IFBIF_STP) &&
2669 	    bif->bif_stp.bp_state == BSTP_IFSTATE_DISCARDING) {
2670 		return (m);
2671 	}
2672 
2673 #if defined(INET) || defined(INET6)
2674 #define	CARP_CHECK_WE_ARE_DST(iface) \
2675 	((iface)->if_carp && (*carp_forus_p)((iface), eh->ether_dhost))
2676 #define	CARP_CHECK_WE_ARE_SRC(iface) \
2677 	((iface)->if_carp && (*carp_forus_p)((iface), eh->ether_shost))
2678 #else
2679 #define	CARP_CHECK_WE_ARE_DST(iface)	false
2680 #define	CARP_CHECK_WE_ARE_SRC(iface)	false
2681 #endif
2682 
2683 #ifdef DEV_NETMAP
2684 #define	GRAB_FOR_NETMAP(ifp, m) do {					\
2685 	if ((if_getcapenable(ifp) & IFCAP_NETMAP) != 0 &&		\
2686 	    ((m)->m_flags & M_BRIDGE_INJECT) == 0) {			\
2687 		(ifp)->if_input(ifp, m);				\
2688 		return (NULL);						\
2689 	}								\
2690 } while (0)
2691 #else
2692 #define	GRAB_FOR_NETMAP(ifp, m)
2693 #endif
2694 
2695 #define GRAB_OUR_PACKETS(iface)						\
2696 	if ((iface)->if_type == IFT_GIF)				\
2697 		continue;						\
2698 	/* It is destined for us. */					\
2699 	if (memcmp(IF_LLADDR(iface), eh->ether_dhost, ETHER_ADDR_LEN) == 0 || \
2700 	    CARP_CHECK_WE_ARE_DST(iface)) {				\
2701 		if (bif->bif_flags & IFBIF_LEARNING) {			\
2702 			error = bridge_rtupdate(sc, eh->ether_shost,	\
2703 			    vlan, bif, 0, IFBAF_DYNAMIC);		\
2704 			if (error && bif->bif_addrmax) {		\
2705 				m_freem(m);				\
2706 				return (NULL);				\
2707 			}						\
2708 		}							\
2709 		m->m_pkthdr.rcvif = iface;				\
2710 		if ((iface) == ifp) {					\
2711 			/* Skip bridge processing... src == dest */	\
2712 			return (m);					\
2713 		}							\
2714 		/* It's passing over or to the bridge, locally. */	\
2715 		ETHER_BPF_MTAP(bifp, m);				\
2716 		if_inc_counter(bifp, IFCOUNTER_IPACKETS, 1);		\
2717 		if_inc_counter(bifp, IFCOUNTER_IBYTES, m->m_pkthdr.len);\
2718 		/* Hand the packet over to netmap if necessary. */	\
2719 		GRAB_FOR_NETMAP(bifp, m);				\
2720 		/* Filter on the physical interface. */			\
2721 		if (V_pfil_local_phys && PFIL_HOOKED_IN_46) {		\
2722 			if (bridge_pfil(&m, NULL, ifp,			\
2723 			    PFIL_IN) != 0 || m == NULL) {		\
2724 				return (NULL);				\
2725 			}						\
2726 		}							\
2727 		if ((iface) != bifp)					\
2728 			ETHER_BPF_MTAP(iface, m);			\
2729 		return (m);						\
2730 	}								\
2731 									\
2732 	/* We just received a packet that we sent out. */		\
2733 	if (memcmp(IF_LLADDR(iface), eh->ether_shost, ETHER_ADDR_LEN) == 0 || \
2734 	    CARP_CHECK_WE_ARE_SRC(iface)) {				\
2735 		m_freem(m);						\
2736 		return (NULL);						\
2737 	}
2738 
2739 	/*
2740 	 * Unicast.  Make sure it's not for the bridge.
2741 	 */
2742 	do { GRAB_OUR_PACKETS(bifp) } while (0);
2743 
2744 	/*
2745 	 * Give a chance for ifp at first priority. This will help when	the
2746 	 * packet comes through the interface like VLAN's with the same MACs
2747 	 * on several interfaces from the same bridge. This also will save
2748 	 * some CPU cycles in case the destination interface and the input
2749 	 * interface (eq ifp) are the same.
2750 	 */
2751 	do { GRAB_OUR_PACKETS(ifp) } while (0);
2752 
2753 	/* Now check the all bridge members. */
2754 	CK_LIST_FOREACH(bif2, &sc->sc_iflist, bif_next) {
2755 		GRAB_OUR_PACKETS(bif2->bif_ifp)
2756 	}
2757 
2758 #undef CARP_CHECK_WE_ARE_DST
2759 #undef CARP_CHECK_WE_ARE_SRC
2760 #undef GRAB_FOR_NETMAP
2761 #undef GRAB_OUR_PACKETS
2762 
2763 	/* Perform the bridge forwarding function. */
2764 	bridge_forward(sc, bif, m);
2765 
2766 	return (NULL);
2767 }
2768 
2769 /*
2770  * Inject a packet back into the host ethernet stack.  This will generally only
2771  * be used by netmap when an application writes to the host TX ring.  The
2772  * M_BRIDGE_INJECT flag ensures that the packet is re-routed to the bridge
2773  * interface after ethernet processing.
2774  */
2775 static void
bridge_inject(struct ifnet * ifp,struct mbuf * m)2776 bridge_inject(struct ifnet *ifp, struct mbuf *m)
2777 {
2778 	struct bridge_softc *sc;
2779 
2780 	KASSERT((if_getcapenable(ifp) & IFCAP_NETMAP) != 0,
2781 	    ("%s: iface %s is not running in netmap mode",
2782 	    __func__, if_name(ifp)));
2783 	KASSERT((m->m_flags & M_BRIDGE_INJECT) == 0,
2784 	    ("%s: mbuf %p has M_BRIDGE_INJECT set", __func__, m));
2785 
2786 	m->m_flags |= M_BRIDGE_INJECT;
2787 	sc = if_getsoftc(ifp);
2788 	sc->sc_if_input(ifp, m);
2789 }
2790 
2791 /*
2792  * bridge_broadcast:
2793  *
2794  *	Send a frame to all interfaces that are members of
2795  *	the bridge, except for the one on which the packet
2796  *	arrived.
2797  *
2798  *	NOTE: Releases the lock on return.
2799  */
2800 static void
bridge_broadcast(struct bridge_softc * sc,struct ifnet * src_if,struct mbuf * m,int runfilt)2801 bridge_broadcast(struct bridge_softc *sc, struct ifnet *src_if,
2802     struct mbuf *m, int runfilt)
2803 {
2804 	struct bridge_iflist *dbif, *sbif;
2805 	struct mbuf *mc;
2806 	struct ifnet *dst_if;
2807 	int used = 0, i;
2808 
2809 	NET_EPOCH_ASSERT();
2810 
2811 	sbif = bridge_lookup_member_if(sc, src_if);
2812 
2813 	/* Filter on the bridge interface before broadcasting */
2814 	if (runfilt && PFIL_HOOKED_OUT_46) {
2815 		if (bridge_pfil(&m, sc->sc_ifp, NULL, PFIL_OUT) != 0)
2816 			return;
2817 		if (m == NULL)
2818 			return;
2819 	}
2820 
2821 	CK_LIST_FOREACH(dbif, &sc->sc_iflist, bif_next) {
2822 		dst_if = dbif->bif_ifp;
2823 		if (dst_if == src_if)
2824 			continue;
2825 
2826 		/* Private segments can not talk to each other */
2827 		if (sbif && (sbif->bif_flags & dbif->bif_flags & IFBIF_PRIVATE))
2828 			continue;
2829 
2830 		if ((dbif->bif_flags & IFBIF_STP) &&
2831 		    dbif->bif_stp.bp_state == BSTP_IFSTATE_DISCARDING)
2832 			continue;
2833 
2834 		if ((dbif->bif_flags & IFBIF_DISCOVER) == 0 &&
2835 		    (m->m_flags & (M_BCAST|M_MCAST)) == 0)
2836 			continue;
2837 
2838 		if ((dst_if->if_drv_flags & IFF_DRV_RUNNING) == 0)
2839 			continue;
2840 
2841 		if (CK_LIST_NEXT(dbif, bif_next) == NULL) {
2842 			mc = m;
2843 			used = 1;
2844 		} else {
2845 			mc = m_dup(m, M_NOWAIT);
2846 			if (mc == NULL) {
2847 				if_inc_counter(sc->sc_ifp, IFCOUNTER_OERRORS, 1);
2848 				continue;
2849 			}
2850 		}
2851 
2852 		/*
2853 		 * Filter on the output interface. Pass a NULL bridge interface
2854 		 * pointer so we do not redundantly filter on the bridge for
2855 		 * each interface we broadcast on.
2856 		 */
2857 		if (runfilt && PFIL_HOOKED_OUT_46) {
2858 			if (used == 0) {
2859 				/* Keep the layer3 header aligned */
2860 				i = min(mc->m_pkthdr.len, max_protohdr);
2861 				mc = m_copyup(mc, i, ETHER_ALIGN);
2862 				if (mc == NULL) {
2863 					if_inc_counter(sc->sc_ifp, IFCOUNTER_OERRORS, 1);
2864 					continue;
2865 				}
2866 			}
2867 			if (bridge_pfil(&mc, NULL, dst_if, PFIL_OUT) != 0)
2868 				continue;
2869 			if (mc == NULL)
2870 				continue;
2871 		}
2872 
2873 		bridge_enqueue(sc, dst_if, mc);
2874 	}
2875 	if (used == 0)
2876 		m_freem(m);
2877 }
2878 
2879 /*
2880  * bridge_span:
2881  *
2882  *	Duplicate a packet out one or more interfaces that are in span mode,
2883  *	the original mbuf is unmodified.
2884  */
2885 static void
bridge_span(struct bridge_softc * sc,struct mbuf * m)2886 bridge_span(struct bridge_softc *sc, struct mbuf *m)
2887 {
2888 	struct bridge_iflist *bif;
2889 	struct ifnet *dst_if;
2890 	struct mbuf *mc;
2891 
2892 	NET_EPOCH_ASSERT();
2893 
2894 	if (CK_LIST_EMPTY(&sc->sc_spanlist))
2895 		return;
2896 
2897 	CK_LIST_FOREACH(bif, &sc->sc_spanlist, bif_next) {
2898 		dst_if = bif->bif_ifp;
2899 
2900 		if ((dst_if->if_drv_flags & IFF_DRV_RUNNING) == 0)
2901 			continue;
2902 
2903 		mc = m_dup(m, M_NOWAIT);
2904 		if (mc == NULL) {
2905 			if_inc_counter(sc->sc_ifp, IFCOUNTER_OERRORS, 1);
2906 			continue;
2907 		}
2908 
2909 		bridge_enqueue(sc, dst_if, mc);
2910 	}
2911 }
2912 
2913 /*
2914  * bridge_rtupdate:
2915  *
2916  *	Add a bridge routing entry.
2917  */
2918 static int
bridge_rtupdate(struct bridge_softc * sc,const uint8_t * dst,ether_vlanid_t vlan,struct bridge_iflist * bif,int setflags,uint8_t flags)2919 bridge_rtupdate(struct bridge_softc *sc, const uint8_t *dst,
2920 		ether_vlanid_t vlan, struct bridge_iflist *bif,
2921 		int setflags, uint8_t flags)
2922 {
2923 	struct bridge_rtnode *brt;
2924 	struct bridge_iflist *obif;
2925 	int error;
2926 
2927 	BRIDGE_LOCK_OR_NET_EPOCH_ASSERT(sc);
2928 
2929 	/* Check the source address is valid and not multicast. */
2930 	if (ETHER_IS_MULTICAST(dst) ||
2931 	    (dst[0] == 0 && dst[1] == 0 && dst[2] == 0 &&
2932 	     dst[3] == 0 && dst[4] == 0 && dst[5] == 0) != 0)
2933 		return (EINVAL);
2934 
2935 	/*
2936 	 * A route for this destination might already exist.  If so,
2937 	 * update it, otherwise create a new one.
2938 	 */
2939 	if ((brt = bridge_rtnode_lookup(sc, dst, vlan)) == NULL) {
2940 		BRIDGE_RT_LOCK(sc);
2941 
2942 		/* Check again, now that we have the lock. There could have
2943 		 * been a race and we only want to insert this once. */
2944 		if (bridge_rtnode_lookup(sc, dst, vlan) != NULL) {
2945 			BRIDGE_RT_UNLOCK(sc);
2946 			return (0);
2947 		}
2948 
2949 		if (sc->sc_brtcnt >= sc->sc_brtmax) {
2950 			sc->sc_brtexceeded++;
2951 			BRIDGE_RT_UNLOCK(sc);
2952 			return (ENOSPC);
2953 		}
2954 		/* Check per interface address limits (if enabled) */
2955 		if (bif->bif_addrmax && bif->bif_addrcnt >= bif->bif_addrmax) {
2956 			bif->bif_addrexceeded++;
2957 			BRIDGE_RT_UNLOCK(sc);
2958 			return (ENOSPC);
2959 		}
2960 
2961 		/*
2962 		 * Allocate a new bridge forwarding node, and
2963 		 * initialize the expiration time and Ethernet
2964 		 * address.
2965 		 */
2966 		brt = uma_zalloc(V_bridge_rtnode_zone, M_NOWAIT | M_ZERO);
2967 		if (brt == NULL) {
2968 			BRIDGE_RT_UNLOCK(sc);
2969 			return (ENOMEM);
2970 		}
2971 		brt->brt_vnet = curvnet;
2972 
2973 		if (bif->bif_flags & IFBIF_STICKY)
2974 			brt->brt_flags = IFBAF_STICKY;
2975 		else
2976 			brt->brt_flags = IFBAF_DYNAMIC;
2977 
2978 		memcpy(brt->brt_addr, dst, ETHER_ADDR_LEN);
2979 		brt->brt_vlan = vlan;
2980 
2981 		brt->brt_dst = bif;
2982 		if ((error = bridge_rtnode_insert(sc, brt)) != 0) {
2983 			uma_zfree(V_bridge_rtnode_zone, brt);
2984 			BRIDGE_RT_UNLOCK(sc);
2985 			return (error);
2986 		}
2987 		bif->bif_addrcnt++;
2988 
2989 		BRIDGE_RT_UNLOCK(sc);
2990 	}
2991 
2992 	if ((brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC &&
2993 	    (obif = brt->brt_dst) != bif) {
2994 		MPASS(obif != NULL);
2995 
2996 		BRIDGE_RT_LOCK(sc);
2997 		brt->brt_dst->bif_addrcnt--;
2998 		brt->brt_dst = bif;
2999 		brt->brt_dst->bif_addrcnt++;
3000 		BRIDGE_RT_UNLOCK(sc);
3001 
3002 		if (V_log_mac_flap &&
3003 		    ppsratecheck(&V_log_last, &V_log_count, V_log_interval)) {
3004 			log(LOG_NOTICE,
3005 			    "%s: mac address %6D vlan %d moved from %s to %s\n",
3006 			    sc->sc_ifp->if_xname,
3007 			    &brt->brt_addr[0], ":",
3008 			    brt->brt_vlan,
3009 			    obif->bif_ifp->if_xname,
3010 			    bif->bif_ifp->if_xname);
3011 		}
3012 	}
3013 
3014 	if ((flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC)
3015 		brt->brt_expire = time_uptime + sc->sc_brttimeout;
3016 	if (setflags)
3017 		brt->brt_flags = flags;
3018 
3019 	return (0);
3020 }
3021 
3022 /*
3023  * bridge_rtlookup:
3024  *
3025  *	Lookup the destination interface for an address.
3026  */
3027 static struct ifnet *
bridge_rtlookup(struct bridge_softc * sc,const uint8_t * addr,ether_vlanid_t vlan)3028 bridge_rtlookup(struct bridge_softc *sc, const uint8_t *addr,
3029 		ether_vlanid_t vlan)
3030 {
3031 	struct bridge_rtnode *brt;
3032 
3033 	NET_EPOCH_ASSERT();
3034 
3035 	if ((brt = bridge_rtnode_lookup(sc, addr, vlan)) == NULL)
3036 		return (NULL);
3037 
3038 	return (brt->brt_ifp);
3039 }
3040 
3041 /*
3042  * bridge_rttrim:
3043  *
3044  *	Trim the routine table so that we have a number
3045  *	of routing entries less than or equal to the
3046  *	maximum number.
3047  */
3048 static void
bridge_rttrim(struct bridge_softc * sc)3049 bridge_rttrim(struct bridge_softc *sc)
3050 {
3051 	struct bridge_rtnode *brt, *nbrt;
3052 
3053 	NET_EPOCH_ASSERT();
3054 	BRIDGE_RT_LOCK_ASSERT(sc);
3055 
3056 	/* Make sure we actually need to do this. */
3057 	if (sc->sc_brtcnt <= sc->sc_brtmax)
3058 		return;
3059 
3060 	/* Force an aging cycle; this might trim enough addresses. */
3061 	bridge_rtage(sc);
3062 	if (sc->sc_brtcnt <= sc->sc_brtmax)
3063 		return;
3064 
3065 	CK_LIST_FOREACH_SAFE(brt, &sc->sc_rtlist, brt_list, nbrt) {
3066 		if ((brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC) {
3067 			bridge_rtnode_destroy(sc, brt);
3068 			if (sc->sc_brtcnt <= sc->sc_brtmax)
3069 				return;
3070 		}
3071 	}
3072 }
3073 
3074 /*
3075  * bridge_timer:
3076  *
3077  *	Aging timer for the bridge.
3078  */
3079 static void
bridge_timer(void * arg)3080 bridge_timer(void *arg)
3081 {
3082 	struct bridge_softc *sc = arg;
3083 
3084 	BRIDGE_RT_LOCK_ASSERT(sc);
3085 
3086 	/* Destruction of rtnodes requires a proper vnet context */
3087 	CURVNET_SET(sc->sc_ifp->if_vnet);
3088 	bridge_rtage(sc);
3089 
3090 	if (sc->sc_ifp->if_drv_flags & IFF_DRV_RUNNING)
3091 		callout_reset(&sc->sc_brcallout,
3092 		    bridge_rtable_prune_period * hz, bridge_timer, sc);
3093 	CURVNET_RESTORE();
3094 }
3095 
3096 /*
3097  * bridge_rtage:
3098  *
3099  *	Perform an aging cycle.
3100  */
3101 static void
bridge_rtage(struct bridge_softc * sc)3102 bridge_rtage(struct bridge_softc *sc)
3103 {
3104 	struct bridge_rtnode *brt, *nbrt;
3105 
3106 	BRIDGE_RT_LOCK_ASSERT(sc);
3107 
3108 	CK_LIST_FOREACH_SAFE(brt, &sc->sc_rtlist, brt_list, nbrt) {
3109 		if ((brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC) {
3110 			if (time_uptime >= brt->brt_expire)
3111 				bridge_rtnode_destroy(sc, brt);
3112 		}
3113 	}
3114 }
3115 
3116 /*
3117  * bridge_rtflush:
3118  *
3119  *	Remove all dynamic addresses from the bridge.
3120  */
3121 static void
bridge_rtflush(struct bridge_softc * sc,int full)3122 bridge_rtflush(struct bridge_softc *sc, int full)
3123 {
3124 	struct bridge_rtnode *brt, *nbrt;
3125 
3126 	BRIDGE_RT_LOCK_ASSERT(sc);
3127 
3128 	CK_LIST_FOREACH_SAFE(brt, &sc->sc_rtlist, brt_list, nbrt) {
3129 		if (full || (brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC)
3130 			bridge_rtnode_destroy(sc, brt);
3131 	}
3132 }
3133 
3134 /*
3135  * bridge_rtdaddr:
3136  *
3137  *	Remove an address from the table.
3138  */
3139 static int
bridge_rtdaddr(struct bridge_softc * sc,const uint8_t * addr,ether_vlanid_t vlan)3140 bridge_rtdaddr(struct bridge_softc *sc, const uint8_t *addr,
3141 	       ether_vlanid_t vlan)
3142 {
3143 	struct bridge_rtnode *brt;
3144 	int found = 0;
3145 
3146 	BRIDGE_RT_LOCK(sc);
3147 
3148 	/*
3149 	 * If vlan is DOT1Q_VID_RSVD_IMPL then we want to delete for all vlans
3150 	 * so the lookup may return more than one.
3151 	 */
3152 	while ((brt = bridge_rtnode_lookup(sc, addr, vlan)) != NULL) {
3153 		bridge_rtnode_destroy(sc, brt);
3154 		found = 1;
3155 	}
3156 
3157 	BRIDGE_RT_UNLOCK(sc);
3158 
3159 	return (found ? 0 : ENOENT);
3160 }
3161 
3162 /*
3163  * bridge_rtdelete:
3164  *
3165  *	Delete routes to a speicifc member interface.
3166  */
3167 static void
bridge_rtdelete(struct bridge_softc * sc,struct ifnet * ifp,int full)3168 bridge_rtdelete(struct bridge_softc *sc, struct ifnet *ifp, int full)
3169 {
3170 	struct bridge_rtnode *brt, *nbrt;
3171 
3172 	BRIDGE_RT_LOCK_ASSERT(sc);
3173 
3174 	CK_LIST_FOREACH_SAFE(brt, &sc->sc_rtlist, brt_list, nbrt) {
3175 		if (brt->brt_ifp == ifp && (full ||
3176 			    (brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC))
3177 			bridge_rtnode_destroy(sc, brt);
3178 	}
3179 }
3180 
3181 /*
3182  * bridge_rtable_init:
3183  *
3184  *	Initialize the route table for this bridge.
3185  */
3186 static void
bridge_rtable_init(struct bridge_softc * sc)3187 bridge_rtable_init(struct bridge_softc *sc)
3188 {
3189 	int i;
3190 
3191 	sc->sc_rthash = malloc(sizeof(*sc->sc_rthash) * BRIDGE_RTHASH_SIZE,
3192 	    M_DEVBUF, M_WAITOK);
3193 
3194 	for (i = 0; i < BRIDGE_RTHASH_SIZE; i++)
3195 		CK_LIST_INIT(&sc->sc_rthash[i]);
3196 
3197 	sc->sc_rthash_key = arc4random();
3198 	CK_LIST_INIT(&sc->sc_rtlist);
3199 }
3200 
3201 /*
3202  * bridge_rtable_fini:
3203  *
3204  *	Deconstruct the route table for this bridge.
3205  */
3206 static void
bridge_rtable_fini(struct bridge_softc * sc)3207 bridge_rtable_fini(struct bridge_softc *sc)
3208 {
3209 
3210 	KASSERT(sc->sc_brtcnt == 0,
3211 	    ("%s: %d bridge routes referenced", __func__, sc->sc_brtcnt));
3212 	free(sc->sc_rthash, M_DEVBUF);
3213 }
3214 
3215 /*
3216  * The following hash function is adapted from "Hash Functions" by Bob Jenkins
3217  * ("Algorithm Alley", Dr. Dobbs Journal, September 1997).
3218  */
3219 #define	mix(a, b, c)							\
3220 do {									\
3221 	a -= b; a -= c; a ^= (c >> 13);					\
3222 	b -= c; b -= a; b ^= (a << 8);					\
3223 	c -= a; c -= b; c ^= (b >> 13);					\
3224 	a -= b; a -= c; a ^= (c >> 12);					\
3225 	b -= c; b -= a; b ^= (a << 16);					\
3226 	c -= a; c -= b; c ^= (b >> 5);					\
3227 	a -= b; a -= c; a ^= (c >> 3);					\
3228 	b -= c; b -= a; b ^= (a << 10);					\
3229 	c -= a; c -= b; c ^= (b >> 15);					\
3230 } while (/*CONSTCOND*/0)
3231 
3232 static __inline uint32_t
bridge_rthash(struct bridge_softc * sc,const uint8_t * addr)3233 bridge_rthash(struct bridge_softc *sc, const uint8_t *addr)
3234 {
3235 	uint32_t a = 0x9e3779b9, b = 0x9e3779b9, c = sc->sc_rthash_key;
3236 
3237 	b += addr[5] << 8;
3238 	b += addr[4];
3239 	a += addr[3] << 24;
3240 	a += addr[2] << 16;
3241 	a += addr[1] << 8;
3242 	a += addr[0];
3243 
3244 	mix(a, b, c);
3245 
3246 	return (c & BRIDGE_RTHASH_MASK);
3247 }
3248 
3249 #undef mix
3250 
3251 static int
bridge_rtnode_addr_cmp(const uint8_t * a,const uint8_t * b)3252 bridge_rtnode_addr_cmp(const uint8_t *a, const uint8_t *b)
3253 {
3254 	int i, d;
3255 
3256 	for (i = 0, d = 0; i < ETHER_ADDR_LEN && d == 0; i++) {
3257 		d = ((int)a[i]) - ((int)b[i]);
3258 	}
3259 
3260 	return (d);
3261 }
3262 
3263 /*
3264  * bridge_rtnode_lookup:
3265  *
3266  *	Look up a bridge route node for the specified destination. Compare the
3267  *	vlan id or if zero then just return the first match.
3268  */
3269 static struct bridge_rtnode *
bridge_rtnode_lookup(struct bridge_softc * sc,const uint8_t * addr,ether_vlanid_t vlan)3270 bridge_rtnode_lookup(struct bridge_softc *sc, const uint8_t *addr,
3271 		     ether_vlanid_t vlan)
3272 {
3273 	struct bridge_rtnode *brt;
3274 	uint32_t hash;
3275 	int dir;
3276 
3277 	BRIDGE_RT_LOCK_OR_NET_EPOCH_ASSERT(sc);
3278 
3279 	hash = bridge_rthash(sc, addr);
3280 	CK_LIST_FOREACH(brt, &sc->sc_rthash[hash], brt_hash) {
3281 		dir = bridge_rtnode_addr_cmp(addr, brt->brt_addr);
3282 		if (dir == 0 && (brt->brt_vlan == vlan || vlan == DOT1Q_VID_RSVD_IMPL))
3283 			return (brt);
3284 		if (dir > 0)
3285 			return (NULL);
3286 	}
3287 
3288 	return (NULL);
3289 }
3290 
3291 /*
3292  * bridge_rtnode_insert:
3293  *
3294  *	Insert the specified bridge node into the route table.  We
3295  *	assume the entry is not already in the table.
3296  */
3297 static int
bridge_rtnode_insert(struct bridge_softc * sc,struct bridge_rtnode * brt)3298 bridge_rtnode_insert(struct bridge_softc *sc, struct bridge_rtnode *brt)
3299 {
3300 	struct bridge_rtnode *lbrt;
3301 	uint32_t hash;
3302 	int dir;
3303 
3304 	BRIDGE_RT_LOCK_ASSERT(sc);
3305 
3306 	hash = bridge_rthash(sc, brt->brt_addr);
3307 
3308 	lbrt = CK_LIST_FIRST(&sc->sc_rthash[hash]);
3309 	if (lbrt == NULL) {
3310 		CK_LIST_INSERT_HEAD(&sc->sc_rthash[hash], brt, brt_hash);
3311 		goto out;
3312 	}
3313 
3314 	do {
3315 		dir = bridge_rtnode_addr_cmp(brt->brt_addr, lbrt->brt_addr);
3316 		if (dir == 0 && brt->brt_vlan == lbrt->brt_vlan)
3317 			return (EEXIST);
3318 		if (dir > 0) {
3319 			CK_LIST_INSERT_BEFORE(lbrt, brt, brt_hash);
3320 			goto out;
3321 		}
3322 		if (CK_LIST_NEXT(lbrt, brt_hash) == NULL) {
3323 			CK_LIST_INSERT_AFTER(lbrt, brt, brt_hash);
3324 			goto out;
3325 		}
3326 		lbrt = CK_LIST_NEXT(lbrt, brt_hash);
3327 	} while (lbrt != NULL);
3328 
3329 #ifdef DIAGNOSTIC
3330 	panic("bridge_rtnode_insert: impossible");
3331 #endif
3332 
3333 out:
3334 	CK_LIST_INSERT_HEAD(&sc->sc_rtlist, brt, brt_list);
3335 	sc->sc_brtcnt++;
3336 
3337 	return (0);
3338 }
3339 
3340 static void
bridge_rtnode_destroy_cb(struct epoch_context * ctx)3341 bridge_rtnode_destroy_cb(struct epoch_context *ctx)
3342 {
3343 	struct bridge_rtnode *brt;
3344 
3345 	brt = __containerof(ctx, struct bridge_rtnode, brt_epoch_ctx);
3346 
3347 	CURVNET_SET(brt->brt_vnet);
3348 	uma_zfree(V_bridge_rtnode_zone, brt);
3349 	CURVNET_RESTORE();
3350 }
3351 
3352 /*
3353  * bridge_rtnode_destroy:
3354  *
3355  *	Destroy a bridge rtnode.
3356  */
3357 static void
bridge_rtnode_destroy(struct bridge_softc * sc,struct bridge_rtnode * brt)3358 bridge_rtnode_destroy(struct bridge_softc *sc, struct bridge_rtnode *brt)
3359 {
3360 	BRIDGE_RT_LOCK_ASSERT(sc);
3361 
3362 	CK_LIST_REMOVE(brt, brt_hash);
3363 
3364 	CK_LIST_REMOVE(brt, brt_list);
3365 	sc->sc_brtcnt--;
3366 	brt->brt_dst->bif_addrcnt--;
3367 
3368 	NET_EPOCH_CALL(bridge_rtnode_destroy_cb, &brt->brt_epoch_ctx);
3369 }
3370 
3371 /*
3372  * bridge_rtable_expire:
3373  *
3374  *	Set the expiry time for all routes on an interface.
3375  */
3376 static void
bridge_rtable_expire(struct ifnet * ifp,int age)3377 bridge_rtable_expire(struct ifnet *ifp, int age)
3378 {
3379 	struct bridge_iflist *bif = NULL;
3380 	struct bridge_softc *sc = NULL;
3381 	struct bridge_rtnode *brt;
3382 
3383 	CURVNET_SET(ifp->if_vnet);
3384 
3385 	bif = ifp->if_bridge;
3386 	if (bif)
3387 		sc = bif->bif_sc;
3388 	MPASS(sc != NULL);
3389 	BRIDGE_RT_LOCK(sc);
3390 
3391 	/*
3392 	 * If the age is zero then flush, otherwise set all the expiry times to
3393 	 * age for the interface
3394 	 */
3395 	if (age == 0)
3396 		bridge_rtdelete(sc, ifp, IFBF_FLUSHDYN);
3397 	else {
3398 		CK_LIST_FOREACH(brt, &sc->sc_rtlist, brt_list) {
3399 			/* Cap the expiry time to 'age' */
3400 			if (brt->brt_ifp == ifp &&
3401 			    brt->brt_expire > time_uptime + age &&
3402 			    (brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC)
3403 				brt->brt_expire = time_uptime + age;
3404 		}
3405 	}
3406 	BRIDGE_RT_UNLOCK(sc);
3407 	CURVNET_RESTORE();
3408 }
3409 
3410 /*
3411  * bridge_state_change:
3412  *
3413  *	Callback from the bridgestp code when a port changes states.
3414  */
3415 static void
bridge_state_change(struct ifnet * ifp,int state)3416 bridge_state_change(struct ifnet *ifp, int state)
3417 {
3418 	struct bridge_iflist *bif = ifp->if_bridge;
3419 	struct bridge_softc *sc = bif->bif_sc;
3420 	static const char *stpstates[] = {
3421 		"disabled",
3422 		"listening",
3423 		"learning",
3424 		"forwarding",
3425 		"blocking",
3426 		"discarding"
3427 	};
3428 
3429 	CURVNET_SET(ifp->if_vnet);
3430 	if (V_log_stp)
3431 		log(LOG_NOTICE, "%s: state changed to %s on %s\n",
3432 		    sc->sc_ifp->if_xname, stpstates[state], ifp->if_xname);
3433 	CURVNET_RESTORE();
3434 }
3435 
3436 /*
3437  * Send bridge packets through pfil if they are one of the types pfil can deal
3438  * with, or if they are ARP or REVARP.  (pfil will pass ARP and REVARP without
3439  * question.) If *bifp or *ifp are NULL then packet filtering is skipped for
3440  * that interface.
3441  */
3442 static int
bridge_pfil(struct mbuf ** mp,struct ifnet * bifp,struct ifnet * ifp,int dir)3443 bridge_pfil(struct mbuf **mp, struct ifnet *bifp, struct ifnet *ifp, int dir)
3444 {
3445 	int snap, error, i;
3446 	struct ether_header *eh1, eh2;
3447 	struct llc llc1;
3448 	u_int16_t ether_type;
3449 	pfil_return_t rv;
3450 #ifdef INET
3451 	struct ip *ip = NULL;
3452 	int hlen = 0;
3453 #endif
3454 
3455 	snap = 0;
3456 	error = -1;	/* Default error if not error == 0 */
3457 
3458 #if 0
3459 	/* we may return with the IP fields swapped, ensure its not shared */
3460 	KASSERT(M_WRITABLE(*mp), ("%s: modifying a shared mbuf", __func__));
3461 #endif
3462 
3463 	if (V_pfil_bridge == 0 && V_pfil_member == 0 && V_pfil_ipfw == 0)
3464 		return (0); /* filtering is disabled */
3465 
3466 	i = min((*mp)->m_pkthdr.len, max_protohdr);
3467 	if ((*mp)->m_len < i) {
3468 	    *mp = m_pullup(*mp, i);
3469 	    if (*mp == NULL) {
3470 		printf("%s: m_pullup failed\n", __func__);
3471 		return (-1);
3472 	    }
3473 	}
3474 
3475 	eh1 = mtod(*mp, struct ether_header *);
3476 	ether_type = ntohs(eh1->ether_type);
3477 
3478 	/*
3479 	 * Check for SNAP/LLC.
3480 	 */
3481 	if (ether_type < ETHERMTU) {
3482 		struct llc *llc2 = (struct llc *)(eh1 + 1);
3483 
3484 		if ((*mp)->m_len >= ETHER_HDR_LEN + 8 &&
3485 		    llc2->llc_dsap == LLC_SNAP_LSAP &&
3486 		    llc2->llc_ssap == LLC_SNAP_LSAP &&
3487 		    llc2->llc_control == LLC_UI) {
3488 			ether_type = htons(llc2->llc_un.type_snap.ether_type);
3489 			snap = 1;
3490 		}
3491 	}
3492 
3493 	/*
3494 	 * If we're trying to filter bridge traffic, only look at traffic for
3495 	 * protocols available in the kernel (IPv4 and/or IPv6) to avoid
3496 	 * passing traffic for an unsupported protocol to the filter.  This is
3497 	 * lame since if we really wanted, say, an AppleTalk filter, we are
3498 	 * hosed, but of course we don't have an AppleTalk filter to begin
3499 	 * with.  (Note that since pfil doesn't understand ARP it will pass
3500 	 * *ALL* ARP traffic.)
3501 	 */
3502 	switch (ether_type) {
3503 #ifdef INET
3504 		case ETHERTYPE_ARP:
3505 		case ETHERTYPE_REVARP:
3506 			if (V_pfil_ipfw_arp == 0)
3507 				return (0); /* Automatically pass */
3508 
3509 			/* FALLTHROUGH */
3510 		case ETHERTYPE_IP:
3511 #endif
3512 #ifdef INET6
3513 		case ETHERTYPE_IPV6:
3514 #endif /* INET6 */
3515 			break;
3516 
3517 		default:
3518 			/*
3519 			 * We get here if the packet isn't from a supported
3520 			 * protocol.  Check to see if the user wants to pass
3521 			 * non-IP packets, these will not be checked by pfil(9)
3522 			 * and passed unconditionally so the default is to
3523 			 * drop.
3524 			 */
3525 			if (V_pfil_onlyip)
3526 				goto bad;
3527 	}
3528 
3529 	/* Run the packet through pfil before stripping link headers */
3530 	if (PFIL_HOOKED_OUT(V_link_pfil_head) && V_pfil_ipfw != 0 &&
3531 	    dir == PFIL_OUT && ifp != NULL) {
3532 		switch (pfil_mbuf_out(V_link_pfil_head, mp, ifp, NULL)) {
3533 		case PFIL_DROPPED:
3534 			return (EACCES);
3535 		case PFIL_CONSUMED:
3536 			return (0);
3537 		}
3538 	}
3539 
3540 	/* Strip off the Ethernet header and keep a copy. */
3541 	m_copydata(*mp, 0, ETHER_HDR_LEN, (caddr_t) &eh2);
3542 	m_adj(*mp, ETHER_HDR_LEN);
3543 
3544 	/* Strip off snap header, if present */
3545 	if (snap) {
3546 		m_copydata(*mp, 0, sizeof(struct llc), (caddr_t) &llc1);
3547 		m_adj(*mp, sizeof(struct llc));
3548 	}
3549 
3550 	/*
3551 	 * Check the IP header for alignment and errors
3552 	 */
3553 	if (dir == PFIL_IN) {
3554 		switch (ether_type) {
3555 #ifdef INET
3556 			case ETHERTYPE_IP:
3557 				error = bridge_ip_checkbasic(mp);
3558 				break;
3559 #endif
3560 #ifdef INET6
3561 			case ETHERTYPE_IPV6:
3562 				error = bridge_ip6_checkbasic(mp);
3563 				break;
3564 #endif /* INET6 */
3565 			default:
3566 				error = 0;
3567 		}
3568 		if (error)
3569 			goto bad;
3570 	}
3571 
3572 	error = 0;
3573 
3574 	/*
3575 	 * Run the packet through pfil
3576 	 */
3577 	rv = PFIL_PASS;
3578 	switch (ether_type) {
3579 #ifdef INET
3580 	case ETHERTYPE_IP:
3581 		/*
3582 		 * Run pfil on the member interface and the bridge, both can
3583 		 * be skipped by clearing pfil_member or pfil_bridge.
3584 		 *
3585 		 * Keep the order:
3586 		 *   in_if -> bridge_if -> out_if
3587 		 */
3588 		if (V_pfil_bridge && dir == PFIL_OUT && bifp != NULL && (rv =
3589 		    pfil_mbuf_out(V_inet_pfil_head, mp, bifp, NULL)) !=
3590 		    PFIL_PASS)
3591 			break;
3592 
3593 		if (V_pfil_member && ifp != NULL) {
3594 			rv = (dir == PFIL_OUT) ?
3595 			    pfil_mbuf_out(V_inet_pfil_head, mp, ifp, NULL) :
3596 			    pfil_mbuf_in(V_inet_pfil_head, mp, ifp, NULL);
3597 			if (rv != PFIL_PASS)
3598 				break;
3599 		}
3600 
3601 		if (V_pfil_bridge && dir == PFIL_IN && bifp != NULL && (rv =
3602 		    pfil_mbuf_in(V_inet_pfil_head, mp, bifp, NULL)) !=
3603 		    PFIL_PASS)
3604 			break;
3605 
3606 		/* check if we need to fragment the packet */
3607 		/* bridge_fragment generates a mbuf chain of packets */
3608 		/* that already include eth headers */
3609 		if (V_pfil_member && ifp != NULL && dir == PFIL_OUT) {
3610 			i = (*mp)->m_pkthdr.len;
3611 			if (i > ifp->if_mtu) {
3612 				error = bridge_fragment(ifp, mp, &eh2, snap,
3613 					    &llc1);
3614 				return (error);
3615 			}
3616 		}
3617 
3618 		/* Recalculate the ip checksum. */
3619 		ip = mtod(*mp, struct ip *);
3620 		hlen = ip->ip_hl << 2;
3621 		if (hlen < sizeof(struct ip))
3622 			goto bad;
3623 		if (hlen > (*mp)->m_len) {
3624 			if ((*mp = m_pullup(*mp, hlen)) == NULL)
3625 				goto bad;
3626 			ip = mtod(*mp, struct ip *);
3627 			if (ip == NULL)
3628 				goto bad;
3629 		}
3630 		ip->ip_sum = 0;
3631 		if (hlen == sizeof(struct ip))
3632 			ip->ip_sum = in_cksum_hdr(ip);
3633 		else
3634 			ip->ip_sum = in_cksum(*mp, hlen);
3635 
3636 		break;
3637 #endif /* INET */
3638 #ifdef INET6
3639 	case ETHERTYPE_IPV6:
3640 		if (V_pfil_bridge && dir == PFIL_OUT && bifp != NULL && (rv =
3641 		    pfil_mbuf_out(V_inet6_pfil_head, mp, bifp, NULL)) !=
3642 		    PFIL_PASS)
3643 			break;
3644 
3645 		if (V_pfil_member && ifp != NULL) {
3646 			rv = (dir == PFIL_OUT) ?
3647 			    pfil_mbuf_out(V_inet6_pfil_head, mp, ifp, NULL) :
3648 			    pfil_mbuf_in(V_inet6_pfil_head, mp, ifp, NULL);
3649 			if (rv != PFIL_PASS)
3650 				break;
3651 		}
3652 
3653 		if (V_pfil_bridge && dir == PFIL_IN && bifp != NULL && (rv =
3654 		    pfil_mbuf_in(V_inet6_pfil_head, mp, bifp, NULL)) !=
3655 		    PFIL_PASS)
3656 			break;
3657 		break;
3658 #endif
3659 	}
3660 
3661 	switch (rv) {
3662 	case PFIL_CONSUMED:
3663 		return (0);
3664 	case PFIL_DROPPED:
3665 		return (EACCES);
3666 	default:
3667 		break;
3668 	}
3669 
3670 	error = -1;
3671 
3672 	/*
3673 	 * Finally, put everything back the way it was and return
3674 	 */
3675 	if (snap) {
3676 		M_PREPEND(*mp, sizeof(struct llc), M_NOWAIT);
3677 		if (*mp == NULL)
3678 			return (error);
3679 		bcopy(&llc1, mtod(*mp, caddr_t), sizeof(struct llc));
3680 	}
3681 
3682 	M_PREPEND(*mp, ETHER_HDR_LEN, M_NOWAIT);
3683 	if (*mp == NULL)
3684 		return (error);
3685 	bcopy(&eh2, mtod(*mp, caddr_t), ETHER_HDR_LEN);
3686 
3687 	return (0);
3688 
3689 bad:
3690 	m_freem(*mp);
3691 	*mp = NULL;
3692 	return (error);
3693 }
3694 
3695 #ifdef INET
3696 /*
3697  * Perform basic checks on header size since
3698  * pfil assumes ip_input has already processed
3699  * it for it.  Cut-and-pasted from ip_input.c.
3700  * Given how simple the IPv6 version is,
3701  * does the IPv4 version really need to be
3702  * this complicated?
3703  *
3704  * XXX Should we update ipstat here, or not?
3705  * XXX Right now we update ipstat but not
3706  * XXX csum_counter.
3707  */
3708 static int
bridge_ip_checkbasic(struct mbuf ** mp)3709 bridge_ip_checkbasic(struct mbuf **mp)
3710 {
3711 	struct mbuf *m = *mp;
3712 	struct ip *ip;
3713 	int len, hlen;
3714 	u_short sum;
3715 
3716 	if (*mp == NULL)
3717 		return (-1);
3718 
3719 	if (IP_HDR_ALIGNED_P(mtod(m, caddr_t)) == 0) {
3720 		if ((m = m_copyup(m, sizeof(struct ip),
3721 			(max_linkhdr + 3) & ~3)) == NULL) {
3722 			/* XXXJRT new stat, please */
3723 			KMOD_IPSTAT_INC(ips_toosmall);
3724 			goto bad;
3725 		}
3726 	} else if (__predict_false(m->m_len < sizeof (struct ip))) {
3727 		if ((m = m_pullup(m, sizeof (struct ip))) == NULL) {
3728 			KMOD_IPSTAT_INC(ips_toosmall);
3729 			goto bad;
3730 		}
3731 	}
3732 	ip = mtod(m, struct ip *);
3733 	if (ip == NULL) goto bad;
3734 
3735 	if (ip->ip_v != IPVERSION) {
3736 		KMOD_IPSTAT_INC(ips_badvers);
3737 		goto bad;
3738 	}
3739 	hlen = ip->ip_hl << 2;
3740 	if (hlen < sizeof(struct ip)) { /* minimum header length */
3741 		KMOD_IPSTAT_INC(ips_badhlen);
3742 		goto bad;
3743 	}
3744 	if (hlen > m->m_len) {
3745 		if ((m = m_pullup(m, hlen)) == NULL) {
3746 			KMOD_IPSTAT_INC(ips_badhlen);
3747 			goto bad;
3748 		}
3749 		ip = mtod(m, struct ip *);
3750 		if (ip == NULL) goto bad;
3751 	}
3752 
3753 	if (m->m_pkthdr.csum_flags & CSUM_IP_CHECKED) {
3754 		sum = !(m->m_pkthdr.csum_flags & CSUM_IP_VALID);
3755 	} else {
3756 		if (hlen == sizeof(struct ip)) {
3757 			sum = in_cksum_hdr(ip);
3758 		} else {
3759 			sum = in_cksum(m, hlen);
3760 		}
3761 	}
3762 	if (sum) {
3763 		KMOD_IPSTAT_INC(ips_badsum);
3764 		goto bad;
3765 	}
3766 
3767 	/* Retrieve the packet length. */
3768 	len = ntohs(ip->ip_len);
3769 
3770 	/*
3771 	 * Check for additional length bogosity
3772 	 */
3773 	if (len < hlen) {
3774 		KMOD_IPSTAT_INC(ips_badlen);
3775 		goto bad;
3776 	}
3777 
3778 	/*
3779 	 * Check that the amount of data in the buffers
3780 	 * is as at least much as the IP header would have us expect.
3781 	 * Drop packet if shorter than we expect.
3782 	 */
3783 	if (m->m_pkthdr.len < len) {
3784 		KMOD_IPSTAT_INC(ips_tooshort);
3785 		goto bad;
3786 	}
3787 
3788 	/* Checks out, proceed */
3789 	*mp = m;
3790 	return (0);
3791 
3792 bad:
3793 	*mp = m;
3794 	return (-1);
3795 }
3796 #endif /* INET */
3797 
3798 #ifdef INET6
3799 /*
3800  * Same as above, but for IPv6.
3801  * Cut-and-pasted from ip6_input.c.
3802  * XXX Should we update ip6stat, or not?
3803  */
3804 static int
bridge_ip6_checkbasic(struct mbuf ** mp)3805 bridge_ip6_checkbasic(struct mbuf **mp)
3806 {
3807 	struct mbuf *m = *mp;
3808 	struct ip6_hdr *ip6;
3809 
3810 	/*
3811 	 * If the IPv6 header is not aligned, slurp it up into a new
3812 	 * mbuf with space for link headers, in the event we forward
3813 	 * it.  Otherwise, if it is aligned, make sure the entire base
3814 	 * IPv6 header is in the first mbuf of the chain.
3815 	 */
3816 	if (IP6_HDR_ALIGNED_P(mtod(m, caddr_t)) == 0) {
3817 		struct ifnet *inifp = m->m_pkthdr.rcvif;
3818 		if ((m = m_copyup(m, sizeof(struct ip6_hdr),
3819 			    (max_linkhdr + 3) & ~3)) == NULL) {
3820 			/* XXXJRT new stat, please */
3821 			IP6STAT_INC(ip6s_toosmall);
3822 			in6_ifstat_inc(inifp, ifs6_in_hdrerr);
3823 			goto bad;
3824 		}
3825 	} else if (__predict_false(m->m_len < sizeof(struct ip6_hdr))) {
3826 		struct ifnet *inifp = m->m_pkthdr.rcvif;
3827 		if ((m = m_pullup(m, sizeof(struct ip6_hdr))) == NULL) {
3828 			IP6STAT_INC(ip6s_toosmall);
3829 			in6_ifstat_inc(inifp, ifs6_in_hdrerr);
3830 			goto bad;
3831 		}
3832 	}
3833 
3834 	ip6 = mtod(m, struct ip6_hdr *);
3835 
3836 	if ((ip6->ip6_vfc & IPV6_VERSION_MASK) != IPV6_VERSION) {
3837 		IP6STAT_INC(ip6s_badvers);
3838 		in6_ifstat_inc(m->m_pkthdr.rcvif, ifs6_in_hdrerr);
3839 		goto bad;
3840 	}
3841 
3842 	/* Checks out, proceed */
3843 	*mp = m;
3844 	return (0);
3845 
3846 bad:
3847 	*mp = m;
3848 	return (-1);
3849 }
3850 #endif /* INET6 */
3851 
3852 #ifdef INET
3853 /*
3854  * bridge_fragment:
3855  *
3856  *	Fragment mbuf chain in multiple packets and prepend ethernet header.
3857  */
3858 static int
bridge_fragment(struct ifnet * ifp,struct mbuf ** mp,struct ether_header * eh,int snap,struct llc * llc)3859 bridge_fragment(struct ifnet *ifp, struct mbuf **mp, struct ether_header *eh,
3860     int snap, struct llc *llc)
3861 {
3862 	struct mbuf *m = *mp, *nextpkt = NULL, *mprev = NULL, *mcur = NULL;
3863 	struct ip *ip;
3864 	int error = -1;
3865 
3866 	if (m->m_len < sizeof(struct ip) &&
3867 	    (m = m_pullup(m, sizeof(struct ip))) == NULL)
3868 		goto dropit;
3869 	ip = mtod(m, struct ip *);
3870 
3871 	m->m_pkthdr.csum_flags |= CSUM_IP;
3872 	error = ip_fragment(ip, &m, ifp->if_mtu, ifp->if_hwassist);
3873 	if (error)
3874 		goto dropit;
3875 
3876 	/*
3877 	 * Walk the chain and re-add the Ethernet header for
3878 	 * each mbuf packet.
3879 	 */
3880 	for (mcur = m; mcur; mcur = mcur->m_nextpkt) {
3881 		nextpkt = mcur->m_nextpkt;
3882 		mcur->m_nextpkt = NULL;
3883 		if (snap) {
3884 			M_PREPEND(mcur, sizeof(struct llc), M_NOWAIT);
3885 			if (mcur == NULL) {
3886 				error = ENOBUFS;
3887 				if (mprev != NULL)
3888 					mprev->m_nextpkt = nextpkt;
3889 				goto dropit;
3890 			}
3891 			bcopy(llc, mtod(mcur, caddr_t),sizeof(struct llc));
3892 		}
3893 
3894 		M_PREPEND(mcur, ETHER_HDR_LEN, M_NOWAIT);
3895 		if (mcur == NULL) {
3896 			error = ENOBUFS;
3897 			if (mprev != NULL)
3898 				mprev->m_nextpkt = nextpkt;
3899 			goto dropit;
3900 		}
3901 		bcopy(eh, mtod(mcur, caddr_t), ETHER_HDR_LEN);
3902 
3903 		/*
3904 		 * The previous two M_PREPEND could have inserted one or two
3905 		 * mbufs in front so we have to update the previous packet's
3906 		 * m_nextpkt.
3907 		 */
3908 		mcur->m_nextpkt = nextpkt;
3909 		if (mprev != NULL)
3910 			mprev->m_nextpkt = mcur;
3911 		else {
3912 			/* The first mbuf in the original chain needs to be
3913 			 * updated. */
3914 			*mp = mcur;
3915 		}
3916 		mprev = mcur;
3917 	}
3918 
3919 	KMOD_IPSTAT_INC(ips_fragmented);
3920 	return (error);
3921 
3922 dropit:
3923 	for (mcur = *mp; mcur; mcur = m) { /* droping the full packet chain */
3924 		m = mcur->m_nextpkt;
3925 		m_freem(mcur);
3926 	}
3927 	return (error);
3928 }
3929 #endif /* INET */
3930 
3931 static void
bridge_linkstate(struct ifnet * ifp)3932 bridge_linkstate(struct ifnet *ifp)
3933 {
3934 	struct bridge_softc *sc = NULL;
3935 	struct bridge_iflist *bif;
3936 	struct epoch_tracker et;
3937 
3938 	NET_EPOCH_ENTER(et);
3939 
3940 	bif = ifp->if_bridge;
3941 	if (bif)
3942 		sc = bif->bif_sc;
3943 
3944 	if (sc != NULL) {
3945 		bridge_linkcheck(sc);
3946 		bstp_linkstate(&bif->bif_stp);
3947 	}
3948 
3949 	NET_EPOCH_EXIT(et);
3950 }
3951 
3952 static void
bridge_linkcheck(struct bridge_softc * sc)3953 bridge_linkcheck(struct bridge_softc *sc)
3954 {
3955 	struct bridge_iflist *bif;
3956 	int new_link, hasls;
3957 
3958 	BRIDGE_LOCK_OR_NET_EPOCH_ASSERT(sc);
3959 
3960 	new_link = LINK_STATE_DOWN;
3961 	hasls = 0;
3962 	/* Our link is considered up if at least one of our ports is active */
3963 	CK_LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
3964 		if (bif->bif_ifp->if_capabilities & IFCAP_LINKSTATE)
3965 			hasls++;
3966 		if (bif->bif_ifp->if_link_state == LINK_STATE_UP) {
3967 			new_link = LINK_STATE_UP;
3968 			break;
3969 		}
3970 	}
3971 	if (!CK_LIST_EMPTY(&sc->sc_iflist) && !hasls) {
3972 		/* If no interfaces support link-state then we default to up */
3973 		new_link = LINK_STATE_UP;
3974 	}
3975 	if_link_state_change(sc->sc_ifp, new_link);
3976 }
3977