1 /* $NetBSD: if_bridge.c,v 1.31 2005/06/01 19:45:34 jdc Exp $ */
2
3 /*-
4 * SPDX-License-Identifier: BSD-4-Clause
5 *
6 * Copyright 2001 Wasabi Systems, Inc.
7 * All rights reserved.
8 *
9 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed for the NetBSD Project by
22 * Wasabi Systems, Inc.
23 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
24 * or promote products derived from this software without specific prior
25 * written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
28 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE.
38 */
39
40 /*
41 * Copyright (c) 1999, 2000 Jason L. Wright (jason@thought.net)
42 * All rights reserved.
43 *
44 * Redistribution and use in source and binary forms, with or without
45 * modification, are permitted provided that the following conditions
46 * are met:
47 * 1. Redistributions of source code must retain the above copyright
48 * notice, this list of conditions and the following disclaimer.
49 * 2. Redistributions in binary form must reproduce the above copyright
50 * notice, this list of conditions and the following disclaimer in the
51 * documentation and/or other materials provided with the distribution.
52 *
53 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
54 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
55 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
56 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
57 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
58 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
59 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
60 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
61 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
62 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
63 * POSSIBILITY OF SUCH DAMAGE.
64 *
65 * OpenBSD: if_bridge.c,v 1.60 2001/06/15 03:38:33 itojun Exp
66 */
67
68 /*
69 * Network interface bridge support.
70 *
71 * TODO:
72 *
73 * - Currently only supports Ethernet-like interfaces (Ethernet,
74 * 802.11, VLANs on Ethernet, etc.) Figure out a nice way
75 * to bridge other types of interfaces (maybe consider
76 * heterogeneous bridges).
77 */
78
79 #include "opt_inet.h"
80 #include "opt_inet6.h"
81
82 #define EXTERR_CATEGORY EXTERR_CAT_BRIDGE
83
84 #include <sys/param.h>
85 #include <sys/ctype.h> /* string functions */
86 #include <sys/eventhandler.h>
87 #include <sys/exterrvar.h>
88 #include <sys/jail.h>
89 #include <sys/kernel.h>
90 #include <sys/lock.h>
91 #include <sys/malloc.h>
92 #include <sys/mbuf.h>
93 #include <sys/module.h>
94 #include <sys/mutex.h>
95 #include <sys/priv.h>
96 #include <sys/proc.h>
97 #include <sys/protosw.h>
98 #include <sys/random.h>
99 #include <sys/systm.h>
100 #include <sys/socket.h> /* for net/if.h */
101 #include <sys/sockio.h>
102 #include <sys/syslog.h>
103 #include <sys/sysctl.h>
104 #include <sys/time.h>
105
106 #include <vm/uma.h>
107
108 #include <net/bpf.h>
109 #include <net/if.h>
110 #include <net/if_clone.h>
111 #include <net/if_dl.h>
112 #include <net/if_types.h>
113 #include <net/if_var.h>
114 #include <net/if_private.h>
115 #include <net/pfil.h>
116 #include <net/vnet.h>
117
118 #include <netinet/in.h>
119 #include <netinet/in_systm.h>
120 #include <netinet/in_var.h>
121 #include <netinet/ip.h>
122 #include <netinet/ip_var.h>
123 #ifdef INET6
124 #include <netinet/ip6.h>
125 #include <netinet6/ip6_var.h>
126 #include <netinet6/in6_ifattach.h>
127 #endif
128 #if defined(INET) || defined(INET6)
129 #include <netinet/ip_carp.h>
130 #endif
131 #include <machine/in_cksum.h>
132 #include <netinet/if_ether.h>
133 #include <net/bridgestp.h>
134 #include <net/if_bridgevar.h>
135 #include <net/if_llc.h>
136 #include <net/if_vlan_var.h>
137
138 #include <net/route.h>
139
140 /*
141 * At various points in the code we need to know if we're hooked into the INET
142 * and/or INET6 pfil. Define some macros to do that based on which IP versions
143 * are enabled in the kernel. This avoids littering the rest of the code with
144 * #ifnet INET6 to avoid referencing V_inet6_pfil_head.
145 */
146 #ifdef INET6
147 #define PFIL_HOOKED_IN_INET6 PFIL_HOOKED_IN(V_inet6_pfil_head)
148 #define PFIL_HOOKED_OUT_INET6 PFIL_HOOKED_OUT(V_inet6_pfil_head)
149 #else
150 #define PFIL_HOOKED_IN_INET6 false
151 #define PFIL_HOOKED_OUT_INET6 false
152 #endif
153
154 #ifdef INET
155 #define PFIL_HOOKED_IN_INET PFIL_HOOKED_IN(V_inet_pfil_head)
156 #define PFIL_HOOKED_OUT_INET PFIL_HOOKED_OUT(V_inet_pfil_head)
157 #else
158 #define PFIL_HOOKED_IN_INET false
159 #define PFIL_HOOKED_OUT_INET false
160 #endif
161
162 #define PFIL_HOOKED_IN_46 (PFIL_HOOKED_IN_INET6 || PFIL_HOOKED_IN_INET)
163 #define PFIL_HOOKED_OUT_46 (PFIL_HOOKED_OUT_INET6 || PFIL_HOOKED_OUT_INET)
164
165 /*
166 * Size of the route hash table. Must be a power of two.
167 */
168 #ifndef BRIDGE_RTHASH_SIZE
169 #define BRIDGE_RTHASH_SIZE 1024
170 #endif
171
172 #define BRIDGE_RTHASH_MASK (BRIDGE_RTHASH_SIZE - 1)
173
174 /*
175 * Default maximum number of addresses to cache.
176 */
177 #ifndef BRIDGE_RTABLE_MAX
178 #define BRIDGE_RTABLE_MAX 2000
179 #endif
180
181 /*
182 * Timeout (in seconds) for entries learned dynamically.
183 */
184 #ifndef BRIDGE_RTABLE_TIMEOUT
185 #define BRIDGE_RTABLE_TIMEOUT (20 * 60) /* same as ARP */
186 #endif
187
188 /*
189 * Number of seconds between walks of the route list.
190 */
191 #ifndef BRIDGE_RTABLE_PRUNE_PERIOD
192 #define BRIDGE_RTABLE_PRUNE_PERIOD (5 * 60)
193 #endif
194
195 /*
196 * List of capabilities to possibly mask on the member interface.
197 */
198 #define BRIDGE_IFCAPS_MASK (IFCAP_TOE|IFCAP_TSO|IFCAP_TXCSUM|\
199 IFCAP_TXCSUM_IPV6|IFCAP_MEXTPG)
200
201 /*
202 * List of capabilities to strip
203 */
204 #define BRIDGE_IFCAPS_STRIP IFCAP_LRO
205
206 /*
207 * Bridge locking
208 *
209 * The bridge relies heavily on the epoch(9) system to protect its data
210 * structures. This means we can safely use CK_LISTs while in NET_EPOCH, but we
211 * must ensure there is only one writer at a time.
212 *
213 * That is: for read accesses we only need to be in NET_EPOCH, but for write
214 * accesses we must hold:
215 *
216 * - BRIDGE_RT_LOCK, for any change to bridge_rtnodes
217 * - BRIDGE_LOCK, for any other change
218 *
219 * The BRIDGE_LOCK is a sleepable lock, because it is held across ioctl()
220 * calls to bridge member interfaces and these ioctl()s can sleep.
221 * The BRIDGE_RT_LOCK is a non-sleepable mutex, because it is sometimes
222 * required while we're in NET_EPOCH and then we're not allowed to sleep.
223 */
224 #define BRIDGE_LOCK_INIT(_sc) do { \
225 sx_init(&(_sc)->sc_sx, "if_bridge"); \
226 mtx_init(&(_sc)->sc_rt_mtx, "if_bridge rt", NULL, MTX_DEF); \
227 } while (0)
228 #define BRIDGE_LOCK_DESTROY(_sc) do { \
229 sx_destroy(&(_sc)->sc_sx); \
230 mtx_destroy(&(_sc)->sc_rt_mtx); \
231 } while (0)
232 #define BRIDGE_LOCK(_sc) sx_xlock(&(_sc)->sc_sx)
233 #define BRIDGE_UNLOCK(_sc) sx_xunlock(&(_sc)->sc_sx)
234 #define BRIDGE_LOCK_ASSERT(_sc) sx_assert(&(_sc)->sc_sx, SX_XLOCKED)
235 #define BRIDGE_LOCK_OR_NET_EPOCH_ASSERT(_sc) \
236 MPASS(in_epoch(net_epoch_preempt) || sx_xlocked(&(_sc)->sc_sx))
237 #define BRIDGE_UNLOCK_ASSERT(_sc) sx_assert(&(_sc)->sc_sx, SX_UNLOCKED)
238 #define BRIDGE_RT_LOCK(_sc) mtx_lock(&(_sc)->sc_rt_mtx)
239 #define BRIDGE_RT_UNLOCK(_sc) mtx_unlock(&(_sc)->sc_rt_mtx)
240 #define BRIDGE_RT_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->sc_rt_mtx, MA_OWNED)
241 #define BRIDGE_RT_LOCK_OR_NET_EPOCH_ASSERT(_sc) \
242 MPASS(in_epoch(net_epoch_preempt) || mtx_owned(&(_sc)->sc_rt_mtx))
243
244 struct bridge_softc;
245
246 /*
247 * Bridge interface list entry.
248 */
249 struct bridge_iflist {
250 CK_LIST_ENTRY(bridge_iflist) bif_next;
251 struct ifnet *bif_ifp; /* member if */
252 struct bridge_softc *bif_sc; /* parent bridge */
253 struct bstp_port bif_stp; /* STP state */
254 uint32_t bif_flags; /* member if flags */
255 int bif_savedcaps; /* saved capabilities */
256 uint32_t bif_addrmax; /* max # of addresses */
257 uint32_t bif_addrcnt; /* cur. # of addresses */
258 uint32_t bif_addrexceeded;/* # of address violations */
259 struct epoch_context bif_epoch_ctx;
260 ether_vlanid_t bif_pvid; /* port vlan id */
261 ifbvlan_set_t bif_vlan_set; /* if allowed tagged vlans */
262 };
263
264 /*
265 * Bridge route node.
266 */
267 struct bridge_rtnode {
268 CK_LIST_ENTRY(bridge_rtnode) brt_hash; /* hash table linkage */
269 CK_LIST_ENTRY(bridge_rtnode) brt_list; /* list linkage */
270 struct bridge_iflist *brt_dst; /* destination if */
271 unsigned long brt_expire; /* expiration time */
272 uint8_t brt_flags; /* address flags */
273 uint8_t brt_addr[ETHER_ADDR_LEN];
274 ether_vlanid_t brt_vlan; /* vlan id */
275 struct vnet *brt_vnet;
276 struct epoch_context brt_epoch_ctx;
277 };
278 #define brt_ifp brt_dst->bif_ifp
279
280 /*
281 * Software state for each bridge.
282 */
283 struct bridge_softc {
284 struct ifnet *sc_ifp; /* make this an interface */
285 LIST_ENTRY(bridge_softc) sc_list;
286 struct sx sc_sx;
287 struct mtx sc_rt_mtx;
288 uint32_t sc_brtmax; /* max # of addresses */
289 uint32_t sc_brtcnt; /* cur. # of addresses */
290 uint32_t sc_brttimeout; /* rt timeout in seconds */
291 struct callout sc_brcallout; /* bridge callout */
292 CK_LIST_HEAD(, bridge_iflist) sc_iflist; /* member interface list */
293 CK_LIST_HEAD(, bridge_rtnode) *sc_rthash; /* our forwarding table */
294 CK_LIST_HEAD(, bridge_rtnode) sc_rtlist; /* list version of above */
295 uint32_t sc_rthash_key; /* key for hash */
296 CK_LIST_HEAD(, bridge_iflist) sc_spanlist; /* span ports list */
297 struct bstp_state sc_stp; /* STP state */
298 uint32_t sc_brtexceeded; /* # of cache drops */
299 struct ifnet *sc_ifaddr; /* member mac copied from */
300 struct ether_addr sc_defaddr; /* Default MAC address */
301 if_input_fn_t sc_if_input; /* Saved copy of if_input */
302 struct epoch_context sc_epoch_ctx;
303 ifbr_flags_t sc_flags; /* bridge flags */
304 ether_vlanid_t sc_defpvid; /* default PVID */
305 };
306
307 VNET_DEFINE_STATIC(struct sx, bridge_list_sx);
308 #define V_bridge_list_sx VNET(bridge_list_sx)
309 static eventhandler_tag bridge_detach_cookie;
310
311 int bridge_rtable_prune_period = BRIDGE_RTABLE_PRUNE_PERIOD;
312
313 VNET_DEFINE_STATIC(uma_zone_t, bridge_rtnode_zone);
314 #define V_bridge_rtnode_zone VNET(bridge_rtnode_zone)
315
316 static int bridge_clone_create(struct if_clone *, char *, size_t,
317 struct ifc_data *, struct ifnet **);
318 static int bridge_clone_destroy(struct if_clone *, struct ifnet *, uint32_t);
319
320 static int bridge_ioctl(struct ifnet *, u_long, caddr_t);
321 static void bridge_mutecaps(struct bridge_softc *);
322 static void bridge_set_ifcap(struct bridge_softc *, struct bridge_iflist *,
323 int);
324 static void bridge_ifdetach(void *arg __unused, struct ifnet *);
325 static void bridge_init(void *);
326 static void bridge_dummynet(struct mbuf *, struct ifnet *);
327 static bool bridge_same(const void *, const void *);
328 static void *bridge_get_softc(struct ifnet *);
329 static void bridge_stop(struct ifnet *, int);
330 static int bridge_transmit(struct ifnet *, struct mbuf *);
331 #ifdef ALTQ
332 static void bridge_altq_start(if_t);
333 static int bridge_altq_transmit(if_t, struct mbuf *);
334 #endif
335 static void bridge_qflush(struct ifnet *);
336 static struct mbuf *bridge_input(struct ifnet *, struct mbuf *);
337 static void bridge_inject(struct ifnet *, struct mbuf *);
338 static int bridge_output(struct ifnet *, struct mbuf *, struct sockaddr *,
339 struct rtentry *);
340 static int bridge_enqueue(struct bridge_softc *, struct ifnet *,
341 struct mbuf *, struct bridge_iflist *);
342 static void bridge_rtdelete(struct bridge_softc *, struct ifnet *ifp, int);
343
344 static void bridge_forward(struct bridge_softc *, struct bridge_iflist *,
345 struct mbuf *m);
346 static bool bridge_member_ifaddrs(void);
347 static void bridge_timer(void *);
348
349 static void bridge_broadcast(struct bridge_softc *, struct ifnet *,
350 struct mbuf *, int);
351 static void bridge_span(struct bridge_softc *, struct mbuf *);
352
353 static int bridge_rtupdate(struct bridge_softc *, const uint8_t *,
354 ether_vlanid_t, struct bridge_iflist *, int, uint8_t);
355 static struct ifnet *bridge_rtlookup(struct bridge_softc *, const uint8_t *,
356 ether_vlanid_t);
357 static void bridge_rttrim(struct bridge_softc *);
358 static void bridge_rtage(struct bridge_softc *);
359 static void bridge_rtflush(struct bridge_softc *, int);
360 static int bridge_rtdaddr(struct bridge_softc *, const uint8_t *,
361 ether_vlanid_t);
362 static bool bridge_vfilter_in(const struct bridge_iflist *, struct mbuf *);
363 static bool bridge_vfilter_out(const struct bridge_iflist *,
364 const struct mbuf *);
365
366 static void bridge_rtable_init(struct bridge_softc *);
367 static void bridge_rtable_fini(struct bridge_softc *);
368
369 static int bridge_rtnode_addr_cmp(const uint8_t *, const uint8_t *);
370 static struct bridge_rtnode *bridge_rtnode_lookup(struct bridge_softc *,
371 const uint8_t *, ether_vlanid_t);
372 static int bridge_rtnode_insert(struct bridge_softc *,
373 struct bridge_rtnode *);
374 static void bridge_rtnode_destroy(struct bridge_softc *,
375 struct bridge_rtnode *);
376 static void bridge_rtable_expire(struct ifnet *, int);
377 static void bridge_state_change(struct ifnet *, int);
378
379 static struct bridge_iflist *bridge_lookup_member(struct bridge_softc *,
380 const char *name);
381 static struct bridge_iflist *bridge_lookup_member_if(struct bridge_softc *,
382 struct ifnet *ifp);
383 static void bridge_delete_member(struct bridge_softc *,
384 struct bridge_iflist *, int);
385 static void bridge_delete_span(struct bridge_softc *,
386 struct bridge_iflist *);
387
388 static int bridge_ioctl_add(struct bridge_softc *, void *);
389 static int bridge_ioctl_del(struct bridge_softc *, void *);
390 static int bridge_ioctl_gifflags(struct bridge_softc *, void *);
391 static int bridge_ioctl_sifflags(struct bridge_softc *, void *);
392 static int bridge_ioctl_scache(struct bridge_softc *, void *);
393 static int bridge_ioctl_gcache(struct bridge_softc *, void *);
394 static int bridge_ioctl_gifs(struct bridge_softc *, void *);
395 static int bridge_ioctl_rts(struct bridge_softc *, void *);
396 static int bridge_ioctl_saddr(struct bridge_softc *, void *);
397 static int bridge_ioctl_sto(struct bridge_softc *, void *);
398 static int bridge_ioctl_gto(struct bridge_softc *, void *);
399 static int bridge_ioctl_daddr(struct bridge_softc *, void *);
400 static int bridge_ioctl_flush(struct bridge_softc *, void *);
401 static int bridge_ioctl_gpri(struct bridge_softc *, void *);
402 static int bridge_ioctl_spri(struct bridge_softc *, void *);
403 static int bridge_ioctl_ght(struct bridge_softc *, void *);
404 static int bridge_ioctl_sht(struct bridge_softc *, void *);
405 static int bridge_ioctl_gfd(struct bridge_softc *, void *);
406 static int bridge_ioctl_sfd(struct bridge_softc *, void *);
407 static int bridge_ioctl_gma(struct bridge_softc *, void *);
408 static int bridge_ioctl_sma(struct bridge_softc *, void *);
409 static int bridge_ioctl_sifprio(struct bridge_softc *, void *);
410 static int bridge_ioctl_sifcost(struct bridge_softc *, void *);
411 static int bridge_ioctl_sifmaxaddr(struct bridge_softc *, void *);
412 static int bridge_ioctl_sifpvid(struct bridge_softc *, void *);
413 static int bridge_ioctl_sifvlanset(struct bridge_softc *, void *);
414 static int bridge_ioctl_gifvlanset(struct bridge_softc *, void *);
415 static int bridge_ioctl_addspan(struct bridge_softc *, void *);
416 static int bridge_ioctl_delspan(struct bridge_softc *, void *);
417 static int bridge_ioctl_gbparam(struct bridge_softc *, void *);
418 static int bridge_ioctl_grte(struct bridge_softc *, void *);
419 static int bridge_ioctl_gifsstp(struct bridge_softc *, void *);
420 static int bridge_ioctl_sproto(struct bridge_softc *, void *);
421 static int bridge_ioctl_stxhc(struct bridge_softc *, void *);
422 static int bridge_ioctl_gflags(struct bridge_softc *, void *);
423 static int bridge_ioctl_sflags(struct bridge_softc *, void *);
424 static int bridge_ioctl_gdefpvid(struct bridge_softc *, void *);
425 static int bridge_ioctl_sdefpvid(struct bridge_softc *, void *);
426 static int bridge_pfil(struct mbuf **, struct ifnet *, struct ifnet *,
427 int);
428 #ifdef INET
429 static int bridge_ip_checkbasic(struct mbuf **mp);
430 static int bridge_fragment(struct ifnet *, struct mbuf **mp,
431 struct ether_header *, int, struct llc *);
432 #endif /* INET */
433 #ifdef INET6
434 static int bridge_ip6_checkbasic(struct mbuf **mp);
435 #endif /* INET6 */
436 static void bridge_linkstate(struct ifnet *ifp);
437 static void bridge_linkcheck(struct bridge_softc *sc);
438
439 /*
440 * Use the "null" value from IEEE 802.1Q-2014 Table 9-2
441 * to indicate untagged frames.
442 */
443 #define VLANTAGOF(_m) \
444 ((_m->m_flags & M_VLANTAG) ? EVL_VLANOFTAG(_m->m_pkthdr.ether_vtag) : DOT1Q_VID_NULL)
445
446 static struct bstp_cb_ops bridge_ops = {
447 .bcb_state = bridge_state_change,
448 .bcb_rtage = bridge_rtable_expire
449 };
450
451 SYSCTL_DECL(_net_link);
452 static SYSCTL_NODE(_net_link, IFT_BRIDGE, bridge, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
453 "Bridge");
454
455 /* only pass IP[46] packets when pfil is enabled */
456 VNET_DEFINE_STATIC(int, pfil_onlyip) = 1;
457 #define V_pfil_onlyip VNET(pfil_onlyip)
458 SYSCTL_INT(_net_link_bridge, OID_AUTO, pfil_onlyip,
459 CTLFLAG_RWTUN | CTLFLAG_VNET, &VNET_NAME(pfil_onlyip), 0,
460 "Only pass IP packets when pfil is enabled");
461
462 /* run pfil hooks on the bridge interface */
463 VNET_DEFINE_STATIC(int, pfil_bridge) = 0;
464 #define V_pfil_bridge VNET(pfil_bridge)
465 SYSCTL_INT(_net_link_bridge, OID_AUTO, pfil_bridge,
466 CTLFLAG_RWTUN | CTLFLAG_VNET, &VNET_NAME(pfil_bridge), 0,
467 "Packet filter on the bridge interface");
468
469 /* layer2 filter with ipfw */
470 VNET_DEFINE_STATIC(int, pfil_ipfw);
471 #define V_pfil_ipfw VNET(pfil_ipfw)
472
473 /* layer2 ARP filter with ipfw */
474 VNET_DEFINE_STATIC(int, pfil_ipfw_arp);
475 #define V_pfil_ipfw_arp VNET(pfil_ipfw_arp)
476 SYSCTL_INT(_net_link_bridge, OID_AUTO, ipfw_arp,
477 CTLFLAG_RWTUN | CTLFLAG_VNET, &VNET_NAME(pfil_ipfw_arp), 0,
478 "Filter ARP packets through IPFW layer2");
479
480 /* run pfil hooks on the member interface */
481 VNET_DEFINE_STATIC(int, pfil_member) = 0;
482 #define V_pfil_member VNET(pfil_member)
483 SYSCTL_INT(_net_link_bridge, OID_AUTO, pfil_member,
484 CTLFLAG_RWTUN | CTLFLAG_VNET, &VNET_NAME(pfil_member), 0,
485 "Packet filter on the member interface");
486
487 /* run pfil hooks on the physical interface for locally destined packets */
488 VNET_DEFINE_STATIC(int, pfil_local_phys);
489 #define V_pfil_local_phys VNET(pfil_local_phys)
490 SYSCTL_INT(_net_link_bridge, OID_AUTO, pfil_local_phys,
491 CTLFLAG_RWTUN | CTLFLAG_VNET, &VNET_NAME(pfil_local_phys), 0,
492 "Packet filter on the physical interface for locally destined packets");
493
494 /* log STP state changes */
495 VNET_DEFINE_STATIC(int, log_stp);
496 #define V_log_stp VNET(log_stp)
497 SYSCTL_INT(_net_link_bridge, OID_AUTO, log_stp,
498 CTLFLAG_RWTUN | CTLFLAG_VNET, &VNET_NAME(log_stp), 0,
499 "Log STP state changes");
500
501 /* share MAC with first bridge member */
502 VNET_DEFINE_STATIC(int, bridge_inherit_mac);
503 #define V_bridge_inherit_mac VNET(bridge_inherit_mac)
504 SYSCTL_INT(_net_link_bridge, OID_AUTO, inherit_mac,
505 CTLFLAG_RWTUN | CTLFLAG_VNET, &VNET_NAME(bridge_inherit_mac), 0,
506 "Inherit MAC address from the first bridge member");
507
508 VNET_DEFINE_STATIC(int, allow_llz_overlap) = 0;
509 #define V_allow_llz_overlap VNET(allow_llz_overlap)
510 SYSCTL_INT(_net_link_bridge, OID_AUTO, allow_llz_overlap,
511 CTLFLAG_RW | CTLFLAG_VNET, &VNET_NAME(allow_llz_overlap), 0,
512 "Allow overlap of link-local scope "
513 "zones of a bridge interface and the member interfaces");
514
515 /* log MAC address port flapping */
516 VNET_DEFINE_STATIC(bool, log_mac_flap) = true;
517 #define V_log_mac_flap VNET(log_mac_flap)
518 SYSCTL_BOOL(_net_link_bridge, OID_AUTO, log_mac_flap,
519 CTLFLAG_RW | CTLFLAG_VNET, &VNET_NAME(log_mac_flap), true,
520 "Log MAC address port flapping");
521
522 /* allow IP addresses on bridge members */
523 VNET_DEFINE_STATIC(bool, member_ifaddrs) = false;
524 #define V_member_ifaddrs VNET(member_ifaddrs)
525 SYSCTL_BOOL(_net_link_bridge, OID_AUTO, member_ifaddrs,
526 CTLFLAG_RW | CTLFLAG_VNET, &VNET_NAME(member_ifaddrs), false,
527 "Allow layer 3 addresses on bridge members");
528
529 static bool
bridge_member_ifaddrs(void)530 bridge_member_ifaddrs(void)
531 {
532 return (V_member_ifaddrs);
533 }
534
535 VNET_DEFINE_STATIC(int, log_interval) = 5;
536 VNET_DEFINE_STATIC(int, log_count) = 0;
537 VNET_DEFINE_STATIC(struct timeval, log_last) = { 0 };
538
539 #define V_log_interval VNET(log_interval)
540 #define V_log_count VNET(log_count)
541 #define V_log_last VNET(log_last)
542
543 struct bridge_control {
544 int (*bc_func)(struct bridge_softc *, void *);
545 int bc_argsize;
546 int bc_flags;
547 };
548
549 #define BC_F_COPYIN 0x01 /* copy arguments in */
550 #define BC_F_COPYOUT 0x02 /* copy arguments out */
551 #define BC_F_SUSER 0x04 /* do super-user check */
552
553 static const struct bridge_control bridge_control_table[] = {
554 { bridge_ioctl_add, sizeof(struct ifbreq),
555 BC_F_COPYIN|BC_F_SUSER },
556 { bridge_ioctl_del, sizeof(struct ifbreq),
557 BC_F_COPYIN|BC_F_SUSER },
558
559 { bridge_ioctl_gifflags, sizeof(struct ifbreq),
560 BC_F_COPYIN|BC_F_COPYOUT },
561 { bridge_ioctl_sifflags, sizeof(struct ifbreq),
562 BC_F_COPYIN|BC_F_SUSER },
563
564 { bridge_ioctl_scache, sizeof(struct ifbrparam),
565 BC_F_COPYIN|BC_F_SUSER },
566 { bridge_ioctl_gcache, sizeof(struct ifbrparam),
567 BC_F_COPYOUT },
568
569 { bridge_ioctl_gifs, sizeof(struct ifbifconf),
570 BC_F_COPYIN|BC_F_COPYOUT },
571 { bridge_ioctl_rts, sizeof(struct ifbaconf),
572 BC_F_COPYIN|BC_F_COPYOUT },
573
574 { bridge_ioctl_saddr, sizeof(struct ifbareq),
575 BC_F_COPYIN|BC_F_SUSER },
576
577 { bridge_ioctl_sto, sizeof(struct ifbrparam),
578 BC_F_COPYIN|BC_F_SUSER },
579 { bridge_ioctl_gto, sizeof(struct ifbrparam),
580 BC_F_COPYOUT },
581
582 { bridge_ioctl_daddr, sizeof(struct ifbareq),
583 BC_F_COPYIN|BC_F_SUSER },
584
585 { bridge_ioctl_flush, sizeof(struct ifbreq),
586 BC_F_COPYIN|BC_F_SUSER },
587
588 { bridge_ioctl_gpri, sizeof(struct ifbrparam),
589 BC_F_COPYOUT },
590 { bridge_ioctl_spri, sizeof(struct ifbrparam),
591 BC_F_COPYIN|BC_F_SUSER },
592
593 { bridge_ioctl_ght, sizeof(struct ifbrparam),
594 BC_F_COPYOUT },
595 { bridge_ioctl_sht, sizeof(struct ifbrparam),
596 BC_F_COPYIN|BC_F_SUSER },
597
598 { bridge_ioctl_gfd, sizeof(struct ifbrparam),
599 BC_F_COPYOUT },
600 { bridge_ioctl_sfd, sizeof(struct ifbrparam),
601 BC_F_COPYIN|BC_F_SUSER },
602
603 { bridge_ioctl_gma, sizeof(struct ifbrparam),
604 BC_F_COPYOUT },
605 { bridge_ioctl_sma, sizeof(struct ifbrparam),
606 BC_F_COPYIN|BC_F_SUSER },
607
608 { bridge_ioctl_sifprio, sizeof(struct ifbreq),
609 BC_F_COPYIN|BC_F_SUSER },
610
611 { bridge_ioctl_sifcost, sizeof(struct ifbreq),
612 BC_F_COPYIN|BC_F_SUSER },
613
614 { bridge_ioctl_addspan, sizeof(struct ifbreq),
615 BC_F_COPYIN|BC_F_SUSER },
616 { bridge_ioctl_delspan, sizeof(struct ifbreq),
617 BC_F_COPYIN|BC_F_SUSER },
618
619 { bridge_ioctl_gbparam, sizeof(struct ifbropreq),
620 BC_F_COPYOUT },
621
622 { bridge_ioctl_grte, sizeof(struct ifbrparam),
623 BC_F_COPYOUT },
624
625 { bridge_ioctl_gifsstp, sizeof(struct ifbpstpconf),
626 BC_F_COPYIN|BC_F_COPYOUT },
627
628 { bridge_ioctl_sproto, sizeof(struct ifbrparam),
629 BC_F_COPYIN|BC_F_SUSER },
630
631 { bridge_ioctl_stxhc, sizeof(struct ifbrparam),
632 BC_F_COPYIN|BC_F_SUSER },
633
634 { bridge_ioctl_sifmaxaddr, sizeof(struct ifbreq),
635 BC_F_COPYIN|BC_F_SUSER },
636
637 { bridge_ioctl_sifpvid, sizeof(struct ifbreq),
638 BC_F_COPYIN|BC_F_SUSER },
639
640 { bridge_ioctl_sifvlanset, sizeof(struct ifbif_vlan_req),
641 BC_F_COPYIN|BC_F_SUSER },
642
643 { bridge_ioctl_gifvlanset, sizeof(struct ifbif_vlan_req),
644 BC_F_COPYIN|BC_F_COPYOUT },
645
646 { bridge_ioctl_gflags, sizeof(struct ifbrparam),
647 BC_F_COPYOUT },
648
649 { bridge_ioctl_sflags, sizeof(struct ifbrparam),
650 BC_F_COPYIN|BC_F_SUSER },
651
652 { bridge_ioctl_gdefpvid, sizeof(struct ifbrparam),
653 BC_F_COPYOUT },
654
655 { bridge_ioctl_sdefpvid, sizeof(struct ifbrparam),
656 BC_F_COPYIN|BC_F_SUSER },
657 };
658 static const int bridge_control_table_size = nitems(bridge_control_table);
659
660 VNET_DEFINE_STATIC(LIST_HEAD(, bridge_softc), bridge_list) =
661 LIST_HEAD_INITIALIZER();
662 #define V_bridge_list VNET(bridge_list)
663 #define BRIDGE_LIST_LOCK_INIT(x) sx_init(&V_bridge_list_sx, \
664 "if_bridge list")
665 #define BRIDGE_LIST_LOCK_DESTROY(x) sx_destroy(&V_bridge_list_sx)
666 #define BRIDGE_LIST_LOCK(x) sx_xlock(&V_bridge_list_sx)
667 #define BRIDGE_LIST_UNLOCK(x) sx_xunlock(&V_bridge_list_sx)
668
669 VNET_DEFINE_STATIC(struct if_clone *, bridge_cloner);
670 #define V_bridge_cloner VNET(bridge_cloner)
671
672 static const char bridge_name[] = "bridge";
673
674 static void
vnet_bridge_init(const void * unused __unused)675 vnet_bridge_init(const void *unused __unused)
676 {
677
678 V_bridge_rtnode_zone = uma_zcreate("bridge_rtnode",
679 sizeof(struct bridge_rtnode), NULL, NULL, NULL, NULL,
680 UMA_ALIGN_PTR, 0);
681 BRIDGE_LIST_LOCK_INIT();
682
683 struct if_clone_addreq req = {
684 .create_f = bridge_clone_create,
685 .destroy_f = bridge_clone_destroy,
686 .flags = IFC_F_AUTOUNIT,
687 };
688 V_bridge_cloner = ifc_attach_cloner(bridge_name, &req);
689 }
690 VNET_SYSINIT(vnet_bridge_init, SI_SUB_PROTO_IFATTACHDOMAIN, SI_ORDER_ANY,
691 vnet_bridge_init, NULL);
692
693 static void
vnet_bridge_uninit(const void * unused __unused)694 vnet_bridge_uninit(const void *unused __unused)
695 {
696
697 ifc_detach_cloner(V_bridge_cloner);
698 V_bridge_cloner = NULL;
699 BRIDGE_LIST_LOCK_DESTROY();
700
701 /* Callbacks may use the UMA zone. */
702 NET_EPOCH_DRAIN_CALLBACKS();
703
704 uma_zdestroy(V_bridge_rtnode_zone);
705 }
706 VNET_SYSUNINIT(vnet_bridge_uninit, SI_SUB_PSEUDO, SI_ORDER_ANY,
707 vnet_bridge_uninit, NULL);
708
709 static int
bridge_modevent(module_t mod,int type,void * data)710 bridge_modevent(module_t mod, int type, void *data)
711 {
712
713 switch (type) {
714 case MOD_LOAD:
715 bridge_dn_p = bridge_dummynet;
716 bridge_same_p = bridge_same;
717 bridge_get_softc_p = bridge_get_softc;
718 bridge_member_ifaddrs_p = bridge_member_ifaddrs;
719 bridge_detach_cookie = EVENTHANDLER_REGISTER(
720 ifnet_departure_event, bridge_ifdetach, NULL,
721 EVENTHANDLER_PRI_ANY);
722 break;
723 case MOD_UNLOAD:
724 EVENTHANDLER_DEREGISTER(ifnet_departure_event,
725 bridge_detach_cookie);
726 bridge_dn_p = NULL;
727 bridge_same_p = NULL;
728 bridge_get_softc_p = NULL;
729 bridge_member_ifaddrs_p = NULL;
730 break;
731 default:
732 return (EOPNOTSUPP);
733 }
734 return (0);
735 }
736
737 static moduledata_t bridge_mod = {
738 "if_bridge",
739 bridge_modevent,
740 0
741 };
742
743 DECLARE_MODULE(if_bridge, bridge_mod, SI_SUB_PSEUDO, SI_ORDER_ANY);
744 MODULE_VERSION(if_bridge, 1);
745 MODULE_DEPEND(if_bridge, bridgestp, 1, 1, 1);
746
747 /*
748 * handler for net.link.bridge.ipfw
749 */
750 static int
sysctl_pfil_ipfw(SYSCTL_HANDLER_ARGS)751 sysctl_pfil_ipfw(SYSCTL_HANDLER_ARGS)
752 {
753 int enable = V_pfil_ipfw;
754 int error;
755
756 error = sysctl_handle_int(oidp, &enable, 0, req);
757 enable &= 1;
758
759 if (enable != V_pfil_ipfw) {
760 V_pfil_ipfw = enable;
761
762 /*
763 * Disable pfil so that ipfw doesnt run twice, if the user
764 * really wants both then they can re-enable pfil_bridge and/or
765 * pfil_member. Also allow non-ip packets as ipfw can filter by
766 * layer2 type.
767 */
768 if (V_pfil_ipfw) {
769 V_pfil_onlyip = 0;
770 V_pfil_bridge = 0;
771 V_pfil_member = 0;
772 }
773 }
774
775 return (error);
776 }
777 SYSCTL_PROC(_net_link_bridge, OID_AUTO, ipfw,
778 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_VNET | CTLFLAG_NEEDGIANT,
779 &VNET_NAME(pfil_ipfw), 0, &sysctl_pfil_ipfw, "I",
780 "Layer2 filter with IPFW");
781
782 #ifdef VIMAGE
783 static void
bridge_reassign(struct ifnet * ifp,struct vnet * newvnet,char * arg)784 bridge_reassign(struct ifnet *ifp, struct vnet *newvnet, char *arg)
785 {
786 struct bridge_softc *sc = ifp->if_softc;
787 struct bridge_iflist *bif;
788
789 BRIDGE_LOCK(sc);
790
791 while ((bif = CK_LIST_FIRST(&sc->sc_iflist)) != NULL)
792 bridge_delete_member(sc, bif, 0);
793
794 while ((bif = CK_LIST_FIRST(&sc->sc_spanlist)) != NULL) {
795 bridge_delete_span(sc, bif);
796 }
797
798 BRIDGE_UNLOCK(sc);
799
800 ether_reassign(ifp, newvnet, arg);
801 }
802 #endif
803
804 /*
805 * bridge_get_softc:
806 *
807 * Return the bridge softc for an ifnet.
808 */
809 static void *
bridge_get_softc(struct ifnet * ifp)810 bridge_get_softc(struct ifnet *ifp)
811 {
812 struct bridge_iflist *bif;
813
814 NET_EPOCH_ASSERT();
815
816 bif = ifp->if_bridge;
817 if (bif == NULL)
818 return (NULL);
819 return (bif->bif_sc);
820 }
821
822 /*
823 * bridge_same:
824 *
825 * Return true if two interfaces are in the same bridge. This is only used by
826 * bridgestp via bridge_same_p.
827 */
828 static bool
bridge_same(const void * bifap,const void * bifbp)829 bridge_same(const void *bifap, const void *bifbp)
830 {
831 const struct bridge_iflist *bifa = bifap, *bifb = bifbp;
832
833 NET_EPOCH_ASSERT();
834
835 if (bifa == NULL || bifb == NULL)
836 return (false);
837
838 return (bifa->bif_sc == bifb->bif_sc);
839 }
840
841 /*
842 * bridge_clone_create:
843 *
844 * Create a new bridge instance.
845 */
846 static int
bridge_clone_create(struct if_clone * ifc,char * name,size_t len,struct ifc_data * ifd,struct ifnet ** ifpp)847 bridge_clone_create(struct if_clone *ifc, char *name, size_t len,
848 struct ifc_data *ifd, struct ifnet **ifpp)
849 {
850 struct bridge_softc *sc;
851 struct ifnet *ifp;
852
853 sc = malloc(sizeof(*sc), M_DEVBUF, M_WAITOK|M_ZERO);
854 ifp = sc->sc_ifp = if_alloc(IFT_ETHER);
855
856 BRIDGE_LOCK_INIT(sc);
857 sc->sc_brtmax = BRIDGE_RTABLE_MAX;
858 sc->sc_brttimeout = BRIDGE_RTABLE_TIMEOUT;
859
860 /* Initialize our routing table. */
861 bridge_rtable_init(sc);
862
863 callout_init_mtx(&sc->sc_brcallout, &sc->sc_rt_mtx, 0);
864
865 CK_LIST_INIT(&sc->sc_iflist);
866 CK_LIST_INIT(&sc->sc_spanlist);
867
868 ifp->if_softc = sc;
869 if_initname(ifp, bridge_name, ifd->unit);
870 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
871 ifp->if_capabilities = ifp->if_capenable = IFCAP_VLAN_HWTAGGING;
872 ifp->if_ioctl = bridge_ioctl;
873 #ifdef ALTQ
874 ifp->if_start = bridge_altq_start;
875 ifp->if_transmit = bridge_altq_transmit;
876 IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen);
877 ifp->if_snd.ifq_drv_maxlen = 0;
878 IFQ_SET_READY(&ifp->if_snd);
879 #else
880 ifp->if_transmit = bridge_transmit;
881 #endif
882 ifp->if_qflush = bridge_qflush;
883 ifp->if_init = bridge_init;
884 ifp->if_type = IFT_BRIDGE;
885
886 ether_gen_addr(ifp, &sc->sc_defaddr);
887
888 bstp_attach(&sc->sc_stp, &bridge_ops);
889 ether_ifattach(ifp, sc->sc_defaddr.octet);
890 /* Now undo some of the damage... */
891 ifp->if_baudrate = 0;
892 #ifdef VIMAGE
893 ifp->if_reassign = bridge_reassign;
894 #endif
895 sc->sc_if_input = ifp->if_input; /* ether_input */
896 ifp->if_input = bridge_inject;
897
898 /*
899 * Allow BRIDGE_INPUT() to pass in packets originating from the bridge
900 * itself via bridge_inject(). This is required for netmap but
901 * otherwise has no effect.
902 */
903 ifp->if_bridge_input = bridge_input;
904
905 BRIDGE_LIST_LOCK();
906 LIST_INSERT_HEAD(&V_bridge_list, sc, sc_list);
907 BRIDGE_LIST_UNLOCK();
908 *ifpp = ifp;
909
910 return (0);
911 }
912
913 static void
bridge_clone_destroy_cb(struct epoch_context * ctx)914 bridge_clone_destroy_cb(struct epoch_context *ctx)
915 {
916 struct bridge_softc *sc;
917
918 sc = __containerof(ctx, struct bridge_softc, sc_epoch_ctx);
919
920 BRIDGE_LOCK_DESTROY(sc);
921 free(sc, M_DEVBUF);
922 }
923
924 /*
925 * bridge_clone_destroy:
926 *
927 * Destroy a bridge instance.
928 */
929 static int
bridge_clone_destroy(struct if_clone * ifc,struct ifnet * ifp,uint32_t flags)930 bridge_clone_destroy(struct if_clone *ifc, struct ifnet *ifp, uint32_t flags)
931 {
932 struct bridge_softc *sc = ifp->if_softc;
933 struct bridge_iflist *bif;
934 struct epoch_tracker et;
935
936 BRIDGE_LOCK(sc);
937
938 bridge_stop(ifp, 1);
939 ifp->if_flags &= ~IFF_UP;
940
941 while ((bif = CK_LIST_FIRST(&sc->sc_iflist)) != NULL)
942 bridge_delete_member(sc, bif, 0);
943
944 while ((bif = CK_LIST_FIRST(&sc->sc_spanlist)) != NULL) {
945 bridge_delete_span(sc, bif);
946 }
947
948 /* Tear down the routing table. */
949 bridge_rtable_fini(sc);
950
951 BRIDGE_UNLOCK(sc);
952
953 NET_EPOCH_ENTER(et);
954
955 callout_drain(&sc->sc_brcallout);
956
957 BRIDGE_LIST_LOCK();
958 LIST_REMOVE(sc, sc_list);
959 BRIDGE_LIST_UNLOCK();
960
961 bstp_detach(&sc->sc_stp);
962 #ifdef ALTQ
963 IFQ_PURGE(&ifp->if_snd);
964 #endif
965 NET_EPOCH_EXIT(et);
966
967 ether_ifdetach(ifp);
968 if_free(ifp);
969
970 NET_EPOCH_CALL(bridge_clone_destroy_cb, &sc->sc_epoch_ctx);
971
972 return (0);
973 }
974
975 /*
976 * bridge_ioctl:
977 *
978 * Handle a control request from the operator.
979 */
980 static int
bridge_ioctl(struct ifnet * ifp,u_long cmd,caddr_t data)981 bridge_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
982 {
983 struct bridge_softc *sc = ifp->if_softc;
984 struct ifreq *ifr = (struct ifreq *)data;
985 struct bridge_iflist *bif;
986 struct thread *td = curthread;
987 union {
988 struct ifbreq ifbreq;
989 struct ifbifconf ifbifconf;
990 struct ifbareq ifbareq;
991 struct ifbaconf ifbaconf;
992 struct ifbrparam ifbrparam;
993 struct ifbropreq ifbropreq;
994 struct ifbif_vlan_req ifvlanreq;
995 } args;
996 struct ifdrv *ifd = (struct ifdrv *) data;
997 const struct bridge_control *bc;
998 int error = 0, oldmtu;
999
1000 BRIDGE_LOCK(sc);
1001
1002 switch (cmd) {
1003 case SIOCADDMULTI:
1004 case SIOCDELMULTI:
1005 break;
1006
1007 case SIOCGDRVSPEC:
1008 case SIOCSDRVSPEC:
1009 if (ifd->ifd_cmd >= bridge_control_table_size) {
1010 error = EXTERROR(EINVAL, "Invalid control command");
1011 break;
1012 }
1013 bc = &bridge_control_table[ifd->ifd_cmd];
1014
1015 if (cmd == SIOCGDRVSPEC &&
1016 (bc->bc_flags & BC_F_COPYOUT) == 0) {
1017 error = EXTERROR(EINVAL,
1018 "Inappropriate ioctl for command "
1019 "(expected SIOCSDRVSPEC)");
1020 break;
1021 }
1022 else if (cmd == SIOCSDRVSPEC &&
1023 (bc->bc_flags & BC_F_COPYOUT) != 0) {
1024 error = EXTERROR(EINVAL,
1025 "Inappropriate ioctl for command "
1026 "(expected SIOCGDRVSPEC)");
1027 break;
1028 }
1029
1030 if (bc->bc_flags & BC_F_SUSER) {
1031 error = priv_check(td, PRIV_NET_BRIDGE);
1032 if (error) {
1033 EXTERROR(error, "PRIV_NET_BRIDGE required");
1034 break;
1035 }
1036 }
1037
1038 if (ifd->ifd_len != bc->bc_argsize ||
1039 ifd->ifd_len > sizeof(args)) {
1040 error = EXTERROR(EINVAL, "Invalid argument size");
1041 break;
1042 }
1043
1044 bzero(&args, sizeof(args));
1045 if (bc->bc_flags & BC_F_COPYIN) {
1046 error = copyin(ifd->ifd_data, &args, ifd->ifd_len);
1047 if (error)
1048 break;
1049 }
1050
1051 oldmtu = ifp->if_mtu;
1052 error = (*bc->bc_func)(sc, &args);
1053 if (error)
1054 break;
1055
1056 /*
1057 * Bridge MTU may change during addition of the first port.
1058 * If it did, do network layer specific procedure.
1059 */
1060 if (ifp->if_mtu != oldmtu)
1061 if_notifymtu(ifp);
1062
1063 if (bc->bc_flags & BC_F_COPYOUT)
1064 error = copyout(&args, ifd->ifd_data, ifd->ifd_len);
1065
1066 break;
1067
1068 case SIOCSIFFLAGS:
1069 if (!(ifp->if_flags & IFF_UP) &&
1070 (ifp->if_drv_flags & IFF_DRV_RUNNING)) {
1071 /*
1072 * If interface is marked down and it is running,
1073 * then stop and disable it.
1074 */
1075 bridge_stop(ifp, 1);
1076 } else if ((ifp->if_flags & IFF_UP) &&
1077 !(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
1078 /*
1079 * If interface is marked up and it is stopped, then
1080 * start it.
1081 */
1082 BRIDGE_UNLOCK(sc);
1083 (*ifp->if_init)(sc);
1084 BRIDGE_LOCK(sc);
1085 }
1086 break;
1087
1088 case SIOCSIFMTU:
1089 oldmtu = sc->sc_ifp->if_mtu;
1090
1091 if (ifr->ifr_mtu < IF_MINMTU) {
1092 error = EXTERROR(EINVAL,
1093 "Requested MTU is lower than IF_MINMTU");
1094 break;
1095 }
1096 if (CK_LIST_EMPTY(&sc->sc_iflist)) {
1097 sc->sc_ifp->if_mtu = ifr->ifr_mtu;
1098 break;
1099 }
1100 CK_LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
1101 error = (*bif->bif_ifp->if_ioctl)(bif->bif_ifp,
1102 SIOCSIFMTU, (caddr_t)ifr);
1103 if (error != 0) {
1104 log(LOG_NOTICE, "%s: invalid MTU: %u for"
1105 " member %s\n", sc->sc_ifp->if_xname,
1106 ifr->ifr_mtu,
1107 bif->bif_ifp->if_xname);
1108 error = EINVAL;
1109 break;
1110 }
1111 }
1112 if (error) {
1113 /* Restore the previous MTU on all member interfaces. */
1114 ifr->ifr_mtu = oldmtu;
1115 CK_LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
1116 (*bif->bif_ifp->if_ioctl)(bif->bif_ifp,
1117 SIOCSIFMTU, (caddr_t)ifr);
1118 }
1119 EXTERROR(error,
1120 "Failed to set MTU on member interface");
1121 } else {
1122 sc->sc_ifp->if_mtu = ifr->ifr_mtu;
1123 }
1124 break;
1125 default:
1126 /*
1127 * drop the lock as ether_ioctl() will call bridge_start() and
1128 * cause the lock to be recursed.
1129 */
1130 BRIDGE_UNLOCK(sc);
1131 error = ether_ioctl(ifp, cmd, data);
1132 BRIDGE_LOCK(sc);
1133 break;
1134 }
1135
1136 BRIDGE_UNLOCK(sc);
1137
1138 return (error);
1139 }
1140
1141 /*
1142 * bridge_mutecaps:
1143 *
1144 * Clear or restore unwanted capabilities on the member interface
1145 */
1146 static void
bridge_mutecaps(struct bridge_softc * sc)1147 bridge_mutecaps(struct bridge_softc *sc)
1148 {
1149 struct bridge_iflist *bif;
1150 int enabled, mask;
1151
1152 BRIDGE_LOCK_ASSERT(sc);
1153
1154 /* Initial bitmask of capabilities to test */
1155 mask = BRIDGE_IFCAPS_MASK;
1156
1157 CK_LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
1158 /* Every member must support it or it's disabled */
1159 mask &= bif->bif_savedcaps;
1160 }
1161
1162 CK_LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
1163 enabled = bif->bif_ifp->if_capenable;
1164 enabled &= ~BRIDGE_IFCAPS_STRIP;
1165 /* Strip off mask bits and enable them again if allowed */
1166 enabled &= ~BRIDGE_IFCAPS_MASK;
1167 enabled |= mask;
1168 bridge_set_ifcap(sc, bif, enabled);
1169 }
1170 }
1171
1172 static void
bridge_set_ifcap(struct bridge_softc * sc,struct bridge_iflist * bif,int set)1173 bridge_set_ifcap(struct bridge_softc *sc, struct bridge_iflist *bif, int set)
1174 {
1175 struct ifnet *ifp = bif->bif_ifp;
1176 struct ifreq ifr;
1177 int error, mask, stuck;
1178
1179 bzero(&ifr, sizeof(ifr));
1180 ifr.ifr_reqcap = set;
1181
1182 if (ifp->if_capenable != set) {
1183 error = (*ifp->if_ioctl)(ifp, SIOCSIFCAP, (caddr_t)&ifr);
1184 if (error)
1185 if_printf(sc->sc_ifp,
1186 "error setting capabilities on %s: %d\n",
1187 ifp->if_xname, error);
1188 mask = BRIDGE_IFCAPS_MASK | BRIDGE_IFCAPS_STRIP;
1189 stuck = ifp->if_capenable & mask & ~set;
1190 if (stuck != 0)
1191 if_printf(sc->sc_ifp,
1192 "can't disable some capabilities on %s: 0x%x\n",
1193 ifp->if_xname, stuck);
1194 }
1195 }
1196
1197 /*
1198 * bridge_lookup_member:
1199 *
1200 * Lookup a bridge member interface.
1201 */
1202 static struct bridge_iflist *
bridge_lookup_member(struct bridge_softc * sc,const char * name)1203 bridge_lookup_member(struct bridge_softc *sc, const char *name)
1204 {
1205 struct bridge_iflist *bif;
1206 struct ifnet *ifp;
1207
1208 BRIDGE_LOCK_OR_NET_EPOCH_ASSERT(sc);
1209
1210 CK_LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
1211 ifp = bif->bif_ifp;
1212 if (strcmp(ifp->if_xname, name) == 0)
1213 return (bif);
1214 }
1215
1216 return (NULL);
1217 }
1218
1219 /*
1220 * bridge_lookup_member_if:
1221 *
1222 * Lookup a bridge member interface by ifnet*.
1223 */
1224 static struct bridge_iflist *
bridge_lookup_member_if(struct bridge_softc * sc,struct ifnet * member_ifp)1225 bridge_lookup_member_if(struct bridge_softc *sc, struct ifnet *member_ifp)
1226 {
1227 BRIDGE_LOCK_OR_NET_EPOCH_ASSERT(sc);
1228 return (member_ifp->if_bridge);
1229 }
1230
1231 static void
bridge_delete_member_cb(struct epoch_context * ctx)1232 bridge_delete_member_cb(struct epoch_context *ctx)
1233 {
1234 struct bridge_iflist *bif;
1235
1236 bif = __containerof(ctx, struct bridge_iflist, bif_epoch_ctx);
1237
1238 free(bif, M_DEVBUF);
1239 }
1240
1241 /*
1242 * bridge_delete_member:
1243 *
1244 * Delete the specified member interface.
1245 */
1246 static void
bridge_delete_member(struct bridge_softc * sc,struct bridge_iflist * bif,int gone)1247 bridge_delete_member(struct bridge_softc *sc, struct bridge_iflist *bif,
1248 int gone)
1249 {
1250 struct ifnet *ifs = bif->bif_ifp;
1251 struct ifnet *fif = NULL;
1252 struct bridge_iflist *bifl;
1253
1254 BRIDGE_LOCK_ASSERT(sc);
1255
1256 if (bif->bif_flags & IFBIF_STP)
1257 bstp_disable(&bif->bif_stp);
1258
1259 ifs->if_bridge = NULL;
1260 CK_LIST_REMOVE(bif, bif_next);
1261
1262 /*
1263 * If removing the interface that gave the bridge its mac address, set
1264 * the mac address of the bridge to the address of the next member, or
1265 * to its default address if no members are left.
1266 */
1267 if (V_bridge_inherit_mac && sc->sc_ifaddr == ifs) {
1268 if (CK_LIST_EMPTY(&sc->sc_iflist)) {
1269 bcopy(&sc->sc_defaddr,
1270 IF_LLADDR(sc->sc_ifp), ETHER_ADDR_LEN);
1271 sc->sc_ifaddr = NULL;
1272 } else {
1273 bifl = CK_LIST_FIRST(&sc->sc_iflist);
1274 fif = bifl->bif_ifp;
1275 bcopy(IF_LLADDR(fif),
1276 IF_LLADDR(sc->sc_ifp), ETHER_ADDR_LEN);
1277 sc->sc_ifaddr = fif;
1278 }
1279 EVENTHANDLER_INVOKE(iflladdr_event, sc->sc_ifp);
1280 }
1281
1282 bridge_linkcheck(sc);
1283 bridge_mutecaps(sc); /* recalcuate now this interface is removed */
1284 BRIDGE_RT_LOCK(sc);
1285 bridge_rtdelete(sc, ifs, IFBF_FLUSHALL);
1286 BRIDGE_RT_UNLOCK(sc);
1287 KASSERT(bif->bif_addrcnt == 0,
1288 ("%s: %d bridge routes referenced", __func__, bif->bif_addrcnt));
1289
1290 ifs->if_bridge_output = NULL;
1291 ifs->if_bridge_input = NULL;
1292 ifs->if_bridge_linkstate = NULL;
1293 if (!gone) {
1294 switch (ifs->if_type) {
1295 case IFT_ETHER:
1296 case IFT_L2VLAN:
1297 /*
1298 * Take the interface out of promiscuous mode, but only
1299 * if it was promiscuous in the first place. It might
1300 * not be if we're in the bridge_ioctl_add() error path.
1301 */
1302 if (ifs->if_flags & IFF_PROMISC)
1303 (void) ifpromisc(ifs, 0);
1304 break;
1305
1306 case IFT_GIF:
1307 break;
1308
1309 default:
1310 #ifdef DIAGNOSTIC
1311 panic("bridge_delete_member: impossible");
1312 #endif
1313 break;
1314 }
1315 /* Re-enable any interface capabilities */
1316 bridge_set_ifcap(sc, bif, bif->bif_savedcaps);
1317 }
1318 bstp_destroy(&bif->bif_stp); /* prepare to free */
1319
1320 NET_EPOCH_CALL(bridge_delete_member_cb, &bif->bif_epoch_ctx);
1321 }
1322
1323 /*
1324 * bridge_delete_span:
1325 *
1326 * Delete the specified span interface.
1327 */
1328 static void
bridge_delete_span(struct bridge_softc * sc,struct bridge_iflist * bif)1329 bridge_delete_span(struct bridge_softc *sc, struct bridge_iflist *bif)
1330 {
1331 BRIDGE_LOCK_ASSERT(sc);
1332
1333 KASSERT(bif->bif_ifp->if_bridge == NULL,
1334 ("%s: not a span interface", __func__));
1335
1336 CK_LIST_REMOVE(bif, bif_next);
1337
1338 NET_EPOCH_CALL(bridge_delete_member_cb, &bif->bif_epoch_ctx);
1339 }
1340
1341 static int
bridge_ioctl_add(struct bridge_softc * sc,void * arg)1342 bridge_ioctl_add(struct bridge_softc *sc, void *arg)
1343 {
1344 struct ifbreq *req = arg;
1345 struct bridge_iflist *bif = NULL;
1346 struct ifnet *ifs;
1347 int error = 0;
1348
1349 ifs = ifunit(req->ifbr_ifsname);
1350 if (ifs == NULL)
1351 return (EXTERROR(ENOENT, "No such interface",
1352 req->ifbr_ifsname));
1353 if (ifs->if_ioctl == NULL) /* must be supported */
1354 return (EXTERROR(EINVAL, "Interface must support ioctl(2)"));
1355
1356 /*
1357 * If the new interface is a vlan(4), it could be a bridge SVI.
1358 * Don't allow such things to be added to bridges.
1359 */
1360 if (ifs->if_type == IFT_L2VLAN) {
1361 struct ifnet *parent;
1362 struct epoch_tracker et;
1363 bool is_bridge;
1364
1365 /*
1366 * Entering NET_EPOCH with BRIDGE_LOCK held, but this is okay
1367 * since we don't sleep here.
1368 */
1369 NET_EPOCH_ENTER(et);
1370 parent = VLAN_TRUNKDEV(ifs);
1371 is_bridge = (parent != NULL && parent->if_type == IFT_BRIDGE);
1372 NET_EPOCH_EXIT(et);
1373
1374 if (is_bridge)
1375 return (EXTERROR(EINVAL,
1376 "Bridge SVI cannot be added to a bridge"));
1377 }
1378
1379 /* If it's in the span list, it can't be a member. */
1380 CK_LIST_FOREACH(bif, &sc->sc_spanlist, bif_next)
1381 if (ifs == bif->bif_ifp)
1382 return (EXTERROR(EBUSY,
1383 "Span interface cannot be a member"));
1384
1385 if (ifs->if_bridge) {
1386 struct bridge_iflist *sbif = ifs->if_bridge;
1387 if (sbif->bif_sc == sc)
1388 return (EXTERROR(EEXIST,
1389 "Interface is already a member of this bridge"));
1390
1391 return (EXTERROR(EBUSY,
1392 "Interface is already a member of another bridge"));
1393 }
1394
1395 switch (ifs->if_type) {
1396 case IFT_ETHER:
1397 case IFT_L2VLAN:
1398 case IFT_GIF:
1399 /* permitted interface types */
1400 break;
1401 default:
1402 return (EXTERROR(EINVAL, "Unsupported interface type"));
1403 }
1404
1405 #ifdef INET6
1406 /*
1407 * Two valid inet6 addresses with link-local scope must not be
1408 * on the parent interface and the member interfaces at the
1409 * same time. This restriction is needed to prevent violation
1410 * of link-local scope zone. Attempts to add a member
1411 * interface which has inet6 addresses when the parent has
1412 * inet6 triggers removal of all inet6 addresses on the member
1413 * interface.
1414 */
1415
1416 /* Check if the parent interface has a link-local scope addr. */
1417 if (V_allow_llz_overlap == 0 &&
1418 in6ifa_llaonifp(sc->sc_ifp) != NULL) {
1419 /*
1420 * If any, remove all inet6 addresses from the member
1421 * interfaces.
1422 */
1423 CK_LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
1424 if (in6ifa_llaonifp(bif->bif_ifp)) {
1425 in6_ifdetach(bif->bif_ifp);
1426 if_printf(sc->sc_ifp,
1427 "IPv6 addresses on %s have been removed "
1428 "before adding it as a member to prevent "
1429 "IPv6 address scope violation.\n",
1430 bif->bif_ifp->if_xname);
1431 }
1432 }
1433 if (in6ifa_llaonifp(ifs)) {
1434 in6_ifdetach(ifs);
1435 if_printf(sc->sc_ifp,
1436 "IPv6 addresses on %s have been removed "
1437 "before adding it as a member to prevent "
1438 "IPv6 address scope violation.\n",
1439 ifs->if_xname);
1440 }
1441 }
1442 #endif
1443
1444 /*
1445 * If member_ifaddrs is disabled, do not allow an interface with
1446 * assigned IP addresses to be added to a bridge.
1447 */
1448 if (!V_member_ifaddrs) {
1449 struct ifaddr *ifa;
1450
1451 CK_STAILQ_FOREACH(ifa, &ifs->if_addrhead, ifa_link) {
1452 #ifdef INET
1453 if (ifa->ifa_addr->sa_family == AF_INET)
1454 return (EXTERROR(EINVAL,
1455 "Member interface may not have "
1456 "an IPv4 address configured"));
1457 #endif
1458 #ifdef INET6
1459 if (ifa->ifa_addr->sa_family == AF_INET6)
1460 return (EXTERROR(EINVAL,
1461 "Member interface may not have "
1462 "an IPv6 address configured"));
1463 #endif
1464 }
1465 }
1466
1467 /* Allow the first Ethernet member to define the MTU */
1468 if (CK_LIST_EMPTY(&sc->sc_iflist))
1469 sc->sc_ifp->if_mtu = ifs->if_mtu;
1470 else if (sc->sc_ifp->if_mtu != ifs->if_mtu) {
1471 struct ifreq ifr;
1472
1473 snprintf(ifr.ifr_name, sizeof(ifr.ifr_name), "%s",
1474 ifs->if_xname);
1475 ifr.ifr_mtu = sc->sc_ifp->if_mtu;
1476
1477 error = (*ifs->if_ioctl)(ifs,
1478 SIOCSIFMTU, (caddr_t)&ifr);
1479 if (error != 0) {
1480 log(LOG_NOTICE, "%s: invalid MTU: %u for"
1481 " new member %s\n", sc->sc_ifp->if_xname,
1482 ifr.ifr_mtu,
1483 ifs->if_xname);
1484 return (EXTERROR(EINVAL,
1485 "Failed to set MTU on new member"));
1486 }
1487 }
1488
1489 bif = malloc(sizeof(*bif), M_DEVBUF, M_NOWAIT|M_ZERO);
1490 if (bif == NULL)
1491 return (ENOMEM);
1492
1493 bif->bif_sc = sc;
1494 bif->bif_ifp = ifs;
1495 bif->bif_flags = IFBIF_LEARNING | IFBIF_DISCOVER;
1496 bif->bif_savedcaps = ifs->if_capenable;
1497 if (sc->sc_flags & IFBRF_VLANFILTER)
1498 bif->bif_pvid = sc->sc_defpvid;
1499
1500 /*
1501 * Assign the interface's MAC address to the bridge if it's the first
1502 * member and the MAC address of the bridge has not been changed from
1503 * the default randomly generated one.
1504 */
1505 if (V_bridge_inherit_mac && CK_LIST_EMPTY(&sc->sc_iflist) &&
1506 !memcmp(IF_LLADDR(sc->sc_ifp), sc->sc_defaddr.octet, ETHER_ADDR_LEN)) {
1507 bcopy(IF_LLADDR(ifs), IF_LLADDR(sc->sc_ifp), ETHER_ADDR_LEN);
1508 sc->sc_ifaddr = ifs;
1509 EVENTHANDLER_INVOKE(iflladdr_event, sc->sc_ifp);
1510 }
1511
1512 ifs->if_bridge = bif;
1513 ifs->if_bridge_output = bridge_output;
1514 ifs->if_bridge_input = bridge_input;
1515 ifs->if_bridge_linkstate = bridge_linkstate;
1516 bstp_create(&sc->sc_stp, &bif->bif_stp, bif->bif_ifp);
1517 /*
1518 * XXX: XLOCK HERE!?!
1519 *
1520 * NOTE: insert_***HEAD*** should be safe for the traversals.
1521 */
1522 CK_LIST_INSERT_HEAD(&sc->sc_iflist, bif, bif_next);
1523
1524 /* Set interface capabilities to the intersection set of all members */
1525 bridge_mutecaps(sc);
1526 bridge_linkcheck(sc);
1527
1528 /* Place the interface into promiscuous mode */
1529 switch (ifs->if_type) {
1530 case IFT_ETHER:
1531 case IFT_L2VLAN:
1532 error = ifpromisc(ifs, 1);
1533 break;
1534 }
1535
1536 if (error)
1537 bridge_delete_member(sc, bif, 0);
1538 return (error);
1539 }
1540
1541 static int
bridge_ioctl_del(struct bridge_softc * sc,void * arg)1542 bridge_ioctl_del(struct bridge_softc *sc, void *arg)
1543 {
1544 struct ifbreq *req = arg;
1545 struct bridge_iflist *bif;
1546
1547 bif = bridge_lookup_member(sc, req->ifbr_ifsname);
1548 if (bif == NULL)
1549 return (EXTERROR(ENOENT, "Interface is not a bridge member"));
1550
1551 bridge_delete_member(sc, bif, 0);
1552
1553 return (0);
1554 }
1555
1556 static int
bridge_ioctl_gifflags(struct bridge_softc * sc,void * arg)1557 bridge_ioctl_gifflags(struct bridge_softc *sc, void *arg)
1558 {
1559 struct ifbreq *req = arg;
1560 struct bridge_iflist *bif;
1561 struct bstp_port *bp;
1562
1563 bif = bridge_lookup_member(sc, req->ifbr_ifsname);
1564 if (bif == NULL)
1565 return (EXTERROR(ENOENT, "Interface is not a bridge member"));
1566
1567 bp = &bif->bif_stp;
1568 req->ifbr_ifsflags = bif->bif_flags;
1569 req->ifbr_state = bp->bp_state;
1570 req->ifbr_priority = bp->bp_priority;
1571 req->ifbr_path_cost = bp->bp_path_cost;
1572 req->ifbr_portno = bif->bif_ifp->if_index & 0xfff;
1573 req->ifbr_proto = bp->bp_protover;
1574 req->ifbr_role = bp->bp_role;
1575 req->ifbr_stpflags = bp->bp_flags;
1576 req->ifbr_addrcnt = bif->bif_addrcnt;
1577 req->ifbr_addrmax = bif->bif_addrmax;
1578 req->ifbr_addrexceeded = bif->bif_addrexceeded;
1579 req->ifbr_pvid = bif->bif_pvid;
1580
1581 /* Copy STP state options as flags */
1582 if (bp->bp_operedge)
1583 req->ifbr_ifsflags |= IFBIF_BSTP_EDGE;
1584 if (bp->bp_flags & BSTP_PORT_AUTOEDGE)
1585 req->ifbr_ifsflags |= IFBIF_BSTP_AUTOEDGE;
1586 if (bp->bp_ptp_link)
1587 req->ifbr_ifsflags |= IFBIF_BSTP_PTP;
1588 if (bp->bp_flags & BSTP_PORT_AUTOPTP)
1589 req->ifbr_ifsflags |= IFBIF_BSTP_AUTOPTP;
1590 if (bp->bp_flags & BSTP_PORT_ADMEDGE)
1591 req->ifbr_ifsflags |= IFBIF_BSTP_ADMEDGE;
1592 if (bp->bp_flags & BSTP_PORT_ADMCOST)
1593 req->ifbr_ifsflags |= IFBIF_BSTP_ADMCOST;
1594 return (0);
1595 }
1596
1597 static int
bridge_ioctl_sifflags(struct bridge_softc * sc,void * arg)1598 bridge_ioctl_sifflags(struct bridge_softc *sc, void *arg)
1599 {
1600 struct epoch_tracker et;
1601 struct ifbreq *req = arg;
1602 struct bridge_iflist *bif;
1603 struct bstp_port *bp;
1604 int error;
1605
1606 bif = bridge_lookup_member(sc, req->ifbr_ifsname);
1607 if (bif == NULL)
1608 return (EXTERROR(ENOENT, "Interface is not a bridge member"));
1609 bp = &bif->bif_stp;
1610
1611 if (req->ifbr_ifsflags & IFBIF_SPAN)
1612 /* SPAN is readonly */
1613 return (EXTERROR(EINVAL, "Span interface cannot be modified"));
1614
1615 NET_EPOCH_ENTER(et);
1616
1617 if (req->ifbr_ifsflags & IFBIF_STP) {
1618 if ((bif->bif_flags & IFBIF_STP) == 0) {
1619 error = bstp_enable(&bif->bif_stp);
1620 if (error) {
1621 NET_EPOCH_EXIT(et);
1622 return (EXTERROR(error,
1623 "Failed to enable STP"));
1624 }
1625 }
1626 } else {
1627 if ((bif->bif_flags & IFBIF_STP) != 0)
1628 bstp_disable(&bif->bif_stp);
1629 }
1630
1631 /* Pass on STP flags */
1632 bstp_set_edge(bp, req->ifbr_ifsflags & IFBIF_BSTP_EDGE ? 1 : 0);
1633 bstp_set_autoedge(bp, req->ifbr_ifsflags & IFBIF_BSTP_AUTOEDGE ? 1 : 0);
1634 bstp_set_ptp(bp, req->ifbr_ifsflags & IFBIF_BSTP_PTP ? 1 : 0);
1635 bstp_set_autoptp(bp, req->ifbr_ifsflags & IFBIF_BSTP_AUTOPTP ? 1 : 0);
1636
1637 /* Save the bits relating to the bridge */
1638 bif->bif_flags = req->ifbr_ifsflags & IFBIFMASK;
1639
1640 NET_EPOCH_EXIT(et);
1641
1642 return (0);
1643 }
1644
1645 static int
bridge_ioctl_scache(struct bridge_softc * sc,void * arg)1646 bridge_ioctl_scache(struct bridge_softc *sc, void *arg)
1647 {
1648 struct ifbrparam *param = arg;
1649
1650 sc->sc_brtmax = param->ifbrp_csize;
1651 bridge_rttrim(sc);
1652
1653 return (0);
1654 }
1655
1656 static int
bridge_ioctl_gcache(struct bridge_softc * sc,void * arg)1657 bridge_ioctl_gcache(struct bridge_softc *sc, void *arg)
1658 {
1659 struct ifbrparam *param = arg;
1660
1661 param->ifbrp_csize = sc->sc_brtmax;
1662
1663 return (0);
1664 }
1665
1666 static int
bridge_ioctl_gifs(struct bridge_softc * sc,void * arg)1667 bridge_ioctl_gifs(struct bridge_softc *sc, void *arg)
1668 {
1669 struct ifbifconf *bifc = arg;
1670 struct bridge_iflist *bif;
1671 struct ifbreq breq;
1672 char *buf, *outbuf;
1673 int count, buflen, len, error = 0;
1674
1675 count = 0;
1676 CK_LIST_FOREACH(bif, &sc->sc_iflist, bif_next)
1677 count++;
1678 CK_LIST_FOREACH(bif, &sc->sc_spanlist, bif_next)
1679 count++;
1680
1681 buflen = sizeof(breq) * count;
1682 if (bifc->ifbic_len == 0) {
1683 bifc->ifbic_len = buflen;
1684 return (0);
1685 }
1686 outbuf = malloc(buflen, M_TEMP, M_NOWAIT | M_ZERO);
1687 if (outbuf == NULL)
1688 return (ENOMEM);
1689
1690 count = 0;
1691 buf = outbuf;
1692 len = min(bifc->ifbic_len, buflen);
1693 bzero(&breq, sizeof(breq));
1694 CK_LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
1695 if (len < sizeof(breq))
1696 break;
1697
1698 strlcpy(breq.ifbr_ifsname, bif->bif_ifp->if_xname,
1699 sizeof(breq.ifbr_ifsname));
1700 /* Fill in the ifbreq structure */
1701 error = bridge_ioctl_gifflags(sc, &breq);
1702 if (error)
1703 break;
1704 memcpy(buf, &breq, sizeof(breq));
1705 count++;
1706 buf += sizeof(breq);
1707 len -= sizeof(breq);
1708 }
1709 CK_LIST_FOREACH(bif, &sc->sc_spanlist, bif_next) {
1710 if (len < sizeof(breq))
1711 break;
1712
1713 strlcpy(breq.ifbr_ifsname, bif->bif_ifp->if_xname,
1714 sizeof(breq.ifbr_ifsname));
1715 breq.ifbr_ifsflags = bif->bif_flags;
1716 breq.ifbr_portno = bif->bif_ifp->if_index & 0xfff;
1717 memcpy(buf, &breq, sizeof(breq));
1718 count++;
1719 buf += sizeof(breq);
1720 len -= sizeof(breq);
1721 }
1722
1723 bifc->ifbic_len = sizeof(breq) * count;
1724 error = copyout(outbuf, bifc->ifbic_req, bifc->ifbic_len);
1725 free(outbuf, M_TEMP);
1726 return (error);
1727 }
1728
1729 static int
bridge_ioctl_rts(struct bridge_softc * sc,void * arg)1730 bridge_ioctl_rts(struct bridge_softc *sc, void *arg)
1731 {
1732 struct ifbaconf *bac = arg;
1733 struct bridge_rtnode *brt;
1734 struct ifbareq bareq;
1735 char *buf, *outbuf;
1736 int count, buflen, len, error = 0;
1737
1738 if (bac->ifbac_len == 0)
1739 return (0);
1740
1741 count = 0;
1742 CK_LIST_FOREACH(brt, &sc->sc_rtlist, brt_list)
1743 count++;
1744 buflen = sizeof(bareq) * count;
1745
1746 outbuf = malloc(buflen, M_TEMP, M_NOWAIT | M_ZERO);
1747 if (outbuf == NULL)
1748 return (ENOMEM);
1749
1750 count = 0;
1751 buf = outbuf;
1752 len = min(bac->ifbac_len, buflen);
1753 bzero(&bareq, sizeof(bareq));
1754 CK_LIST_FOREACH(brt, &sc->sc_rtlist, brt_list) {
1755 if (len < sizeof(bareq))
1756 goto out;
1757 strlcpy(bareq.ifba_ifsname, brt->brt_ifp->if_xname,
1758 sizeof(bareq.ifba_ifsname));
1759 memcpy(bareq.ifba_dst, brt->brt_addr, sizeof(brt->brt_addr));
1760 bareq.ifba_vlan = brt->brt_vlan;
1761 if ((brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC &&
1762 time_uptime < brt->brt_expire)
1763 bareq.ifba_expire = brt->brt_expire - time_uptime;
1764 else
1765 bareq.ifba_expire = 0;
1766 bareq.ifba_flags = brt->brt_flags;
1767
1768 memcpy(buf, &bareq, sizeof(bareq));
1769 count++;
1770 buf += sizeof(bareq);
1771 len -= sizeof(bareq);
1772 }
1773 out:
1774 bac->ifbac_len = sizeof(bareq) * count;
1775 error = copyout(outbuf, bac->ifbac_req, bac->ifbac_len);
1776 free(outbuf, M_TEMP);
1777 return (error);
1778 }
1779
1780 static int
bridge_ioctl_saddr(struct bridge_softc * sc,void * arg)1781 bridge_ioctl_saddr(struct bridge_softc *sc, void *arg)
1782 {
1783 struct ifbareq *req = arg;
1784 struct bridge_iflist *bif;
1785 struct epoch_tracker et;
1786 int error;
1787
1788 NET_EPOCH_ENTER(et);
1789 bif = bridge_lookup_member(sc, req->ifba_ifsname);
1790 if (bif == NULL) {
1791 NET_EPOCH_EXIT(et);
1792 return (EXTERROR(ENOENT, "Interface is not a bridge member"));
1793 }
1794
1795 /* bridge_rtupdate() may acquire the lock. */
1796 error = bridge_rtupdate(sc, req->ifba_dst, req->ifba_vlan, bif, 1,
1797 req->ifba_flags);
1798 NET_EPOCH_EXIT(et);
1799
1800 return (error);
1801 }
1802
1803 static int
bridge_ioctl_sto(struct bridge_softc * sc,void * arg)1804 bridge_ioctl_sto(struct bridge_softc *sc, void *arg)
1805 {
1806 struct ifbrparam *param = arg;
1807
1808 sc->sc_brttimeout = param->ifbrp_ctime;
1809 return (0);
1810 }
1811
1812 static int
bridge_ioctl_gto(struct bridge_softc * sc,void * arg)1813 bridge_ioctl_gto(struct bridge_softc *sc, void *arg)
1814 {
1815 struct ifbrparam *param = arg;
1816
1817 param->ifbrp_ctime = sc->sc_brttimeout;
1818 return (0);
1819 }
1820
1821 static int
bridge_ioctl_daddr(struct bridge_softc * sc,void * arg)1822 bridge_ioctl_daddr(struct bridge_softc *sc, void *arg)
1823 {
1824 struct ifbareq *req = arg;
1825 int vlan = req->ifba_vlan;
1826
1827 /* Userspace uses '0' to mean 'any vlan' */
1828 if (vlan == 0)
1829 vlan = DOT1Q_VID_RSVD_IMPL;
1830
1831 return (bridge_rtdaddr(sc, req->ifba_dst, vlan));
1832 }
1833
1834 static int
bridge_ioctl_flush(struct bridge_softc * sc,void * arg)1835 bridge_ioctl_flush(struct bridge_softc *sc, void *arg)
1836 {
1837 struct ifbreq *req = arg;
1838
1839 BRIDGE_RT_LOCK(sc);
1840 bridge_rtflush(sc, req->ifbr_ifsflags);
1841 BRIDGE_RT_UNLOCK(sc);
1842
1843 return (0);
1844 }
1845
1846 static int
bridge_ioctl_gpri(struct bridge_softc * sc,void * arg)1847 bridge_ioctl_gpri(struct bridge_softc *sc, void *arg)
1848 {
1849 struct ifbrparam *param = arg;
1850 struct bstp_state *bs = &sc->sc_stp;
1851
1852 param->ifbrp_prio = bs->bs_bridge_priority;
1853 return (0);
1854 }
1855
1856 static int
bridge_ioctl_spri(struct bridge_softc * sc,void * arg)1857 bridge_ioctl_spri(struct bridge_softc *sc, void *arg)
1858 {
1859 struct ifbrparam *param = arg;
1860
1861 return (bstp_set_priority(&sc->sc_stp, param->ifbrp_prio));
1862 }
1863
1864 static int
bridge_ioctl_ght(struct bridge_softc * sc,void * arg)1865 bridge_ioctl_ght(struct bridge_softc *sc, void *arg)
1866 {
1867 struct ifbrparam *param = arg;
1868 struct bstp_state *bs = &sc->sc_stp;
1869
1870 param->ifbrp_hellotime = bs->bs_bridge_htime >> 8;
1871 return (0);
1872 }
1873
1874 static int
bridge_ioctl_sht(struct bridge_softc * sc,void * arg)1875 bridge_ioctl_sht(struct bridge_softc *sc, void *arg)
1876 {
1877 struct ifbrparam *param = arg;
1878
1879 return (bstp_set_htime(&sc->sc_stp, param->ifbrp_hellotime));
1880 }
1881
1882 static int
bridge_ioctl_gfd(struct bridge_softc * sc,void * arg)1883 bridge_ioctl_gfd(struct bridge_softc *sc, void *arg)
1884 {
1885 struct ifbrparam *param = arg;
1886 struct bstp_state *bs = &sc->sc_stp;
1887
1888 param->ifbrp_fwddelay = bs->bs_bridge_fdelay >> 8;
1889 return (0);
1890 }
1891
1892 static int
bridge_ioctl_sfd(struct bridge_softc * sc,void * arg)1893 bridge_ioctl_sfd(struct bridge_softc *sc, void *arg)
1894 {
1895 struct ifbrparam *param = arg;
1896
1897 return (bstp_set_fdelay(&sc->sc_stp, param->ifbrp_fwddelay));
1898 }
1899
1900 static int
bridge_ioctl_gma(struct bridge_softc * sc,void * arg)1901 bridge_ioctl_gma(struct bridge_softc *sc, void *arg)
1902 {
1903 struct ifbrparam *param = arg;
1904 struct bstp_state *bs = &sc->sc_stp;
1905
1906 param->ifbrp_maxage = bs->bs_bridge_max_age >> 8;
1907 return (0);
1908 }
1909
1910 static int
bridge_ioctl_sma(struct bridge_softc * sc,void * arg)1911 bridge_ioctl_sma(struct bridge_softc *sc, void *arg)
1912 {
1913 struct ifbrparam *param = arg;
1914
1915 return (bstp_set_maxage(&sc->sc_stp, param->ifbrp_maxage));
1916 }
1917
1918 static int
bridge_ioctl_sifprio(struct bridge_softc * sc,void * arg)1919 bridge_ioctl_sifprio(struct bridge_softc *sc, void *arg)
1920 {
1921 struct ifbreq *req = arg;
1922 struct bridge_iflist *bif;
1923
1924 bif = bridge_lookup_member(sc, req->ifbr_ifsname);
1925 if (bif == NULL)
1926 return (EXTERROR(ENOENT, "Interface is not a bridge member"));
1927
1928 return (bstp_set_port_priority(&bif->bif_stp, req->ifbr_priority));
1929 }
1930
1931 static int
bridge_ioctl_sifcost(struct bridge_softc * sc,void * arg)1932 bridge_ioctl_sifcost(struct bridge_softc *sc, void *arg)
1933 {
1934 struct ifbreq *req = arg;
1935 struct bridge_iflist *bif;
1936
1937 bif = bridge_lookup_member(sc, req->ifbr_ifsname);
1938 if (bif == NULL)
1939 return (EXTERROR(ENOENT, "Interface is not a bridge member"));
1940
1941 return (bstp_set_path_cost(&bif->bif_stp, req->ifbr_path_cost));
1942 }
1943
1944 static int
bridge_ioctl_sifmaxaddr(struct bridge_softc * sc,void * arg)1945 bridge_ioctl_sifmaxaddr(struct bridge_softc *sc, void *arg)
1946 {
1947 struct ifbreq *req = arg;
1948 struct bridge_iflist *bif;
1949
1950 bif = bridge_lookup_member(sc, req->ifbr_ifsname);
1951 if (bif == NULL)
1952 return (EXTERROR(ENOENT, "Interface is not a bridge member"));
1953
1954 bif->bif_addrmax = req->ifbr_addrmax;
1955 return (0);
1956 }
1957
1958 static int
bridge_ioctl_sifpvid(struct bridge_softc * sc,void * arg)1959 bridge_ioctl_sifpvid(struct bridge_softc *sc, void *arg)
1960 {
1961 struct ifbreq *req = arg;
1962 struct bridge_iflist *bif;
1963
1964 if ((sc->sc_flags & IFBRF_VLANFILTER) == 0)
1965 return (EXTERROR(EINVAL, "VLAN filtering not enabled"));
1966
1967 bif = bridge_lookup_member(sc, req->ifbr_ifsname);
1968 if (bif == NULL)
1969 return (EXTERROR(ENOENT, "Interface is not a bridge member"));
1970
1971 if (req->ifbr_pvid > DOT1Q_VID_MAX)
1972 return (EXTERROR(EINVAL, "Invalid VLAN ID"));
1973
1974 bif->bif_pvid = req->ifbr_pvid;
1975 return (0);
1976 }
1977
1978 static int
bridge_ioctl_sifvlanset(struct bridge_softc * sc,void * arg)1979 bridge_ioctl_sifvlanset(struct bridge_softc *sc, void *arg)
1980 {
1981 struct ifbif_vlan_req *req = arg;
1982 struct bridge_iflist *bif;
1983
1984 if ((sc->sc_flags & IFBRF_VLANFILTER) == 0)
1985 return (EXTERROR(EINVAL, "VLAN filtering not enabled"));
1986
1987 bif = bridge_lookup_member(sc, req->bv_ifname);
1988 if (bif == NULL)
1989 return (EXTERROR(ENOENT, "Interface is not a bridge member"));
1990
1991 /* Reject invalid VIDs. */
1992 if (BRVLAN_TEST(&req->bv_set, DOT1Q_VID_NULL) ||
1993 BRVLAN_TEST(&req->bv_set, DOT1Q_VID_RSVD_IMPL))
1994 return (EXTERROR(EINVAL, "Invalid VLAN ID in set"));
1995
1996 switch (req->bv_op) {
1997 /* Replace the existing vlan set with the new set */
1998 case BRDG_VLAN_OP_SET:
1999 BIT_COPY(BRVLAN_SETSIZE, &req->bv_set, &bif->bif_vlan_set);
2000 break;
2001
2002 /* Modify the existing vlan set to add the given vlans */
2003 case BRDG_VLAN_OP_ADD:
2004 BIT_OR(BRVLAN_SETSIZE, &bif->bif_vlan_set, &req->bv_set);
2005 break;
2006
2007 /* Modify the existing vlan set to remove the given vlans */
2008 case BRDG_VLAN_OP_DEL:
2009 BIT_ANDNOT(BRVLAN_SETSIZE, &bif->bif_vlan_set, &req->bv_set);
2010 break;
2011
2012 /* Invalid or unknown operation */
2013 default:
2014 return (EXTERROR(EINVAL,
2015 "Unsupported BRDGSIFVLANSET operation"));
2016 }
2017
2018 return (0);
2019 }
2020
2021 static int
bridge_ioctl_gifvlanset(struct bridge_softc * sc,void * arg)2022 bridge_ioctl_gifvlanset(struct bridge_softc *sc, void *arg)
2023 {
2024 struct ifbif_vlan_req *req = arg;
2025 struct bridge_iflist *bif;
2026
2027 bif = bridge_lookup_member(sc, req->bv_ifname);
2028 if (bif == NULL)
2029 return (EXTERROR(ENOENT, "Interface is not a bridge member"));
2030
2031 BIT_COPY(BRVLAN_SETSIZE, &bif->bif_vlan_set, &req->bv_set);
2032 return (0);
2033 }
2034
2035 static int
bridge_ioctl_addspan(struct bridge_softc * sc,void * arg)2036 bridge_ioctl_addspan(struct bridge_softc *sc, void *arg)
2037 {
2038 struct ifbreq *req = arg;
2039 struct bridge_iflist *bif = NULL;
2040 struct ifnet *ifs;
2041
2042 ifs = ifunit(req->ifbr_ifsname);
2043 if (ifs == NULL)
2044 return (EXTERROR(ENOENT, "No such interface"));
2045
2046 CK_LIST_FOREACH(bif, &sc->sc_spanlist, bif_next)
2047 if (ifs == bif->bif_ifp)
2048 return (EXTERROR(EBUSY,
2049 "Interface is already a span port"));
2050
2051 if (ifs->if_bridge != NULL)
2052 return (EXTERROR(EEXIST,
2053 "Interface is already a bridge member"));
2054
2055 switch (ifs->if_type) {
2056 case IFT_ETHER:
2057 case IFT_GIF:
2058 case IFT_L2VLAN:
2059 break;
2060 default:
2061 return (EXTERROR(EINVAL, "Unsupported interface type"));
2062 }
2063
2064 bif = malloc(sizeof(*bif), M_DEVBUF, M_NOWAIT|M_ZERO);
2065 if (bif == NULL)
2066 return (ENOMEM);
2067
2068 bif->bif_ifp = ifs;
2069 bif->bif_flags = IFBIF_SPAN;
2070
2071 CK_LIST_INSERT_HEAD(&sc->sc_spanlist, bif, bif_next);
2072
2073 return (0);
2074 }
2075
2076 static int
bridge_ioctl_delspan(struct bridge_softc * sc,void * arg)2077 bridge_ioctl_delspan(struct bridge_softc *sc, void *arg)
2078 {
2079 struct ifbreq *req = arg;
2080 struct bridge_iflist *bif;
2081 struct ifnet *ifs;
2082
2083 ifs = ifunit(req->ifbr_ifsname);
2084 if (ifs == NULL)
2085 return (EXTERROR(ENOENT, "No such interface"));
2086
2087 CK_LIST_FOREACH(bif, &sc->sc_spanlist, bif_next)
2088 if (ifs == bif->bif_ifp)
2089 break;
2090
2091 if (bif == NULL)
2092 return (EXTERROR(ENOENT, "Interface is not a span port"));
2093
2094 bridge_delete_span(sc, bif);
2095
2096 return (0);
2097 }
2098
2099 static int
bridge_ioctl_gbparam(struct bridge_softc * sc,void * arg)2100 bridge_ioctl_gbparam(struct bridge_softc *sc, void *arg)
2101 {
2102 struct ifbropreq *req = arg;
2103 struct bstp_state *bs = &sc->sc_stp;
2104 struct bstp_port *root_port;
2105
2106 req->ifbop_maxage = bs->bs_bridge_max_age >> 8;
2107 req->ifbop_hellotime = bs->bs_bridge_htime >> 8;
2108 req->ifbop_fwddelay = bs->bs_bridge_fdelay >> 8;
2109
2110 root_port = bs->bs_root_port;
2111 if (root_port == NULL)
2112 req->ifbop_root_port = 0;
2113 else
2114 req->ifbop_root_port = root_port->bp_ifp->if_index;
2115
2116 req->ifbop_holdcount = bs->bs_txholdcount;
2117 req->ifbop_priority = bs->bs_bridge_priority;
2118 req->ifbop_protocol = bs->bs_protover;
2119 req->ifbop_root_path_cost = bs->bs_root_pv.pv_cost;
2120 req->ifbop_bridgeid = bs->bs_bridge_pv.pv_dbridge_id;
2121 req->ifbop_designated_root = bs->bs_root_pv.pv_root_id;
2122 req->ifbop_designated_bridge = bs->bs_root_pv.pv_dbridge_id;
2123 req->ifbop_last_tc_time.tv_sec = bs->bs_last_tc_time.tv_sec;
2124 req->ifbop_last_tc_time.tv_usec = bs->bs_last_tc_time.tv_usec;
2125
2126 return (0);
2127 }
2128
2129 static int
bridge_ioctl_grte(struct bridge_softc * sc,void * arg)2130 bridge_ioctl_grte(struct bridge_softc *sc, void *arg)
2131 {
2132 struct ifbrparam *param = arg;
2133
2134 param->ifbrp_cexceeded = sc->sc_brtexceeded;
2135 return (0);
2136 }
2137
2138 static int
bridge_ioctl_gifsstp(struct bridge_softc * sc,void * arg)2139 bridge_ioctl_gifsstp(struct bridge_softc *sc, void *arg)
2140 {
2141 struct ifbpstpconf *bifstp = arg;
2142 struct bridge_iflist *bif;
2143 struct bstp_port *bp;
2144 struct ifbpstpreq bpreq;
2145 char *buf, *outbuf;
2146 int count, buflen, len, error = 0;
2147
2148 count = 0;
2149 CK_LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
2150 if ((bif->bif_flags & IFBIF_STP) != 0)
2151 count++;
2152 }
2153
2154 buflen = sizeof(bpreq) * count;
2155 if (bifstp->ifbpstp_len == 0) {
2156 bifstp->ifbpstp_len = buflen;
2157 return (0);
2158 }
2159
2160 outbuf = malloc(buflen, M_TEMP, M_NOWAIT | M_ZERO);
2161 if (outbuf == NULL)
2162 return (ENOMEM);
2163
2164 count = 0;
2165 buf = outbuf;
2166 len = min(bifstp->ifbpstp_len, buflen);
2167 bzero(&bpreq, sizeof(bpreq));
2168 CK_LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
2169 if (len < sizeof(bpreq))
2170 break;
2171
2172 if ((bif->bif_flags & IFBIF_STP) == 0)
2173 continue;
2174
2175 bp = &bif->bif_stp;
2176 bpreq.ifbp_portno = bif->bif_ifp->if_index & 0xfff;
2177 bpreq.ifbp_fwd_trans = bp->bp_forward_transitions;
2178 bpreq.ifbp_design_cost = bp->bp_desg_pv.pv_cost;
2179 bpreq.ifbp_design_port = bp->bp_desg_pv.pv_port_id;
2180 bpreq.ifbp_design_bridge = bp->bp_desg_pv.pv_dbridge_id;
2181 bpreq.ifbp_design_root = bp->bp_desg_pv.pv_root_id;
2182
2183 memcpy(buf, &bpreq, sizeof(bpreq));
2184 count++;
2185 buf += sizeof(bpreq);
2186 len -= sizeof(bpreq);
2187 }
2188
2189 bifstp->ifbpstp_len = sizeof(bpreq) * count;
2190 error = copyout(outbuf, bifstp->ifbpstp_req, bifstp->ifbpstp_len);
2191 free(outbuf, M_TEMP);
2192 return (error);
2193 }
2194
2195 static int
bridge_ioctl_sproto(struct bridge_softc * sc,void * arg)2196 bridge_ioctl_sproto(struct bridge_softc *sc, void *arg)
2197 {
2198 struct ifbrparam *param = arg;
2199
2200 return (bstp_set_protocol(&sc->sc_stp, param->ifbrp_proto));
2201 }
2202
2203 static int
bridge_ioctl_stxhc(struct bridge_softc * sc,void * arg)2204 bridge_ioctl_stxhc(struct bridge_softc *sc, void *arg)
2205 {
2206 struct ifbrparam *param = arg;
2207
2208 return (bstp_set_holdcount(&sc->sc_stp, param->ifbrp_txhc));
2209 }
2210
2211 static int
bridge_ioctl_gflags(struct bridge_softc * sc,void * arg)2212 bridge_ioctl_gflags(struct bridge_softc *sc, void *arg)
2213 {
2214 struct ifbrparam *param = arg;
2215
2216 param->ifbrp_flags = sc->sc_flags;
2217
2218 return (0);
2219 }
2220
2221 static int
bridge_ioctl_sflags(struct bridge_softc * sc,void * arg)2222 bridge_ioctl_sflags(struct bridge_softc *sc, void *arg)
2223 {
2224 struct ifbrparam *param = arg;
2225
2226 sc->sc_flags = param->ifbrp_flags;
2227
2228 return (0);
2229 }
2230
2231 static int
bridge_ioctl_gdefpvid(struct bridge_softc * sc,void * arg)2232 bridge_ioctl_gdefpvid(struct bridge_softc *sc, void *arg)
2233 {
2234 struct ifbrparam *param = arg;
2235
2236 param->ifbrp_defpvid = sc->sc_defpvid;
2237
2238 return (0);
2239 }
2240
2241 static int
bridge_ioctl_sdefpvid(struct bridge_softc * sc,void * arg)2242 bridge_ioctl_sdefpvid(struct bridge_softc *sc, void *arg)
2243 {
2244 struct ifbrparam *param = arg;
2245
2246 /* Reject invalid VIDs, but allow 0 to mean 'none'. */
2247 if (param->ifbrp_defpvid > DOT1Q_VID_MAX)
2248 return (EINVAL);
2249
2250 sc->sc_defpvid = param->ifbrp_defpvid;
2251
2252 return (0);
2253 }
2254
2255 /*
2256 * bridge_ifdetach:
2257 *
2258 * Detach an interface from a bridge. Called when a member
2259 * interface is detaching.
2260 */
2261 static void
bridge_ifdetach(void * arg __unused,struct ifnet * ifp)2262 bridge_ifdetach(void *arg __unused, struct ifnet *ifp)
2263 {
2264 struct bridge_iflist *bif = ifp->if_bridge;
2265 struct bridge_softc *sc = NULL;
2266
2267 if (bif)
2268 sc = bif->bif_sc;
2269
2270 if (ifp->if_flags & IFF_RENAMING)
2271 return;
2272 if (V_bridge_cloner == NULL) {
2273 /*
2274 * This detach handler can be called after
2275 * vnet_bridge_uninit(). Just return in that case.
2276 */
2277 return;
2278 }
2279 /* Check if the interface is a bridge member */
2280 if (sc != NULL) {
2281 BRIDGE_LOCK(sc);
2282 bridge_delete_member(sc, bif, 1);
2283 BRIDGE_UNLOCK(sc);
2284 return;
2285 }
2286
2287 /* Check if the interface is a span port */
2288 BRIDGE_LIST_LOCK();
2289 LIST_FOREACH(sc, &V_bridge_list, sc_list) {
2290 BRIDGE_LOCK(sc);
2291 CK_LIST_FOREACH(bif, &sc->sc_spanlist, bif_next)
2292 if (ifp == bif->bif_ifp) {
2293 bridge_delete_span(sc, bif);
2294 break;
2295 }
2296
2297 BRIDGE_UNLOCK(sc);
2298 }
2299 BRIDGE_LIST_UNLOCK();
2300 }
2301
2302 /*
2303 * bridge_init:
2304 *
2305 * Initialize a bridge interface.
2306 */
2307 static void
bridge_init(void * xsc)2308 bridge_init(void *xsc)
2309 {
2310 struct bridge_softc *sc = (struct bridge_softc *)xsc;
2311 struct ifnet *ifp = sc->sc_ifp;
2312
2313 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
2314 return;
2315
2316 BRIDGE_LOCK(sc);
2317 callout_reset(&sc->sc_brcallout, bridge_rtable_prune_period * hz,
2318 bridge_timer, sc);
2319
2320 ifp->if_drv_flags |= IFF_DRV_RUNNING;
2321 bstp_init(&sc->sc_stp); /* Initialize Spanning Tree */
2322
2323 BRIDGE_UNLOCK(sc);
2324 }
2325
2326 /*
2327 * bridge_stop:
2328 *
2329 * Stop the bridge interface.
2330 */
2331 static void
bridge_stop(struct ifnet * ifp,int disable)2332 bridge_stop(struct ifnet *ifp, int disable)
2333 {
2334 struct bridge_softc *sc = ifp->if_softc;
2335
2336 BRIDGE_LOCK_ASSERT(sc);
2337
2338 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
2339 return;
2340
2341 BRIDGE_RT_LOCK(sc);
2342 callout_stop(&sc->sc_brcallout);
2343
2344 bstp_stop(&sc->sc_stp);
2345
2346 bridge_rtflush(sc, IFBF_FLUSHDYN);
2347 BRIDGE_RT_UNLOCK(sc);
2348
2349 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2350 }
2351
2352 /*
2353 * bridge_enqueue:
2354 *
2355 * Enqueue a packet on a bridge member interface.
2356 *
2357 */
2358 static int
bridge_enqueue(struct bridge_softc * sc,struct ifnet * dst_ifp,struct mbuf * m,struct bridge_iflist * bif)2359 bridge_enqueue(struct bridge_softc *sc, struct ifnet *dst_ifp, struct mbuf *m,
2360 struct bridge_iflist *bif)
2361 {
2362 int len, err = 0;
2363 short mflags;
2364 struct mbuf *m0;
2365
2366 /*
2367 * Find the bridge member port this packet is being sent on, if the
2368 * caller didn't already provide it.
2369 */
2370 if (bif == NULL)
2371 bif = bridge_lookup_member_if(sc, dst_ifp);
2372 if (bif == NULL) {
2373 /* Perhaps the interface was removed from the bridge */
2374 m_freem(m);
2375 return (EINVAL);
2376 }
2377
2378 /* We may be sending a fragment so traverse the mbuf */
2379 for (; m; m = m0) {
2380 m0 = m->m_nextpkt;
2381 m->m_nextpkt = NULL;
2382 len = m->m_pkthdr.len;
2383 mflags = m->m_flags;
2384
2385 /*
2386 * If VLAN filtering is enabled, and the native VLAN ID of the
2387 * outgoing interface matches the VLAN ID of the frame, remove
2388 * the VLAN header.
2389 */
2390 if ((sc->sc_flags & IFBRF_VLANFILTER) &&
2391 bif->bif_pvid != DOT1Q_VID_NULL &&
2392 VLANTAGOF(m) == bif->bif_pvid) {
2393 m->m_flags &= ~M_VLANTAG;
2394 m->m_pkthdr.ether_vtag = 0;
2395 }
2396
2397 /*
2398 * If underlying interface can not do VLAN tag insertion itself
2399 * then attach a packet tag that holds it.
2400 */
2401 if ((m->m_flags & M_VLANTAG) &&
2402 (dst_ifp->if_capenable & IFCAP_VLAN_HWTAGGING) == 0) {
2403 m = ether_vlanencap(m, m->m_pkthdr.ether_vtag);
2404 if (m == NULL) {
2405 if_printf(dst_ifp,
2406 "unable to prepend VLAN header\n");
2407 if_inc_counter(dst_ifp, IFCOUNTER_OERRORS, 1);
2408 continue;
2409 }
2410 m->m_flags &= ~M_VLANTAG;
2411 }
2412
2413 M_ASSERTPKTHDR(m); /* We shouldn't transmit mbuf without pkthdr */
2414 if ((err = dst_ifp->if_transmit(dst_ifp, m))) {
2415 int n;
2416
2417 for (m = m0, n = 1; m != NULL; m = m0, n++) {
2418 m0 = m->m_nextpkt;
2419 m_freem(m);
2420 }
2421 if_inc_counter(sc->sc_ifp, IFCOUNTER_OERRORS, n);
2422 break;
2423 }
2424
2425 if_inc_counter(sc->sc_ifp, IFCOUNTER_OPACKETS, 1);
2426 if_inc_counter(sc->sc_ifp, IFCOUNTER_OBYTES, len);
2427 if (mflags & M_MCAST)
2428 if_inc_counter(sc->sc_ifp, IFCOUNTER_OMCASTS, 1);
2429 }
2430
2431 return (err);
2432 }
2433
2434 /*
2435 * bridge_dummynet:
2436 *
2437 * Receive a queued packet from dummynet and pass it on to the output
2438 * interface.
2439 *
2440 * The mbuf has the Ethernet header already attached.
2441 */
2442 static void
bridge_dummynet(struct mbuf * m,struct ifnet * ifp)2443 bridge_dummynet(struct mbuf *m, struct ifnet *ifp)
2444 {
2445 struct bridge_iflist *bif = ifp->if_bridge;
2446 struct bridge_softc *sc = NULL;
2447
2448 if (bif)
2449 sc = bif->bif_sc;
2450
2451 /*
2452 * The packet didnt originate from a member interface. This should only
2453 * ever happen if a member interface is removed while packets are
2454 * queued for it.
2455 */
2456 if (sc == NULL) {
2457 m_freem(m);
2458 return;
2459 }
2460
2461 if (PFIL_HOOKED_OUT_46) {
2462 if (bridge_pfil(&m, sc->sc_ifp, ifp, PFIL_OUT) != 0)
2463 return;
2464 if (m == NULL)
2465 return;
2466 }
2467
2468 bridge_enqueue(sc, ifp, m, NULL);
2469 }
2470
2471 /*
2472 * bridge_output:
2473 *
2474 * Send output from a bridge member interface. This
2475 * performs the bridging function for locally originated
2476 * packets.
2477 *
2478 * The mbuf has the Ethernet header already attached. We must
2479 * enqueue or free the mbuf before returning.
2480 */
2481 static int
bridge_output(struct ifnet * ifp,struct mbuf * m,struct sockaddr * sa,struct rtentry * rt)2482 bridge_output(struct ifnet *ifp, struct mbuf *m, struct sockaddr *sa,
2483 struct rtentry *rt)
2484 {
2485 struct ether_header *eh;
2486 struct bridge_iflist *sbif;
2487 struct ifnet *bifp, *dst_if;
2488 struct bridge_softc *sc;
2489 ether_vlanid_t vlan;
2490
2491 NET_EPOCH_ASSERT();
2492
2493 if (m->m_len < ETHER_HDR_LEN) {
2494 m = m_pullup(m, ETHER_HDR_LEN);
2495 if (m == NULL)
2496 return (0);
2497 }
2498
2499 sbif = ifp->if_bridge;
2500 sc = sbif->bif_sc;
2501 bifp = sc->sc_ifp;
2502
2503 eh = mtod(m, struct ether_header *);
2504 vlan = VLANTAGOF(m);
2505
2506 /*
2507 * If bridge is down, but the original output interface is up,
2508 * go ahead and send out that interface. Otherwise, the packet
2509 * is dropped below.
2510 */
2511 if ((bifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
2512 dst_if = ifp;
2513 goto sendunicast;
2514 }
2515
2516 /*
2517 * If the packet is a multicast, or we don't know a better way to
2518 * get there, send to all interfaces.
2519 */
2520 if (ETHER_IS_MULTICAST(eh->ether_dhost))
2521 dst_if = NULL;
2522 else
2523 dst_if = bridge_rtlookup(sc, eh->ether_dhost, vlan);
2524 /* Tap any traffic not passing back out the originating interface */
2525 if (dst_if != ifp)
2526 ETHER_BPF_MTAP(bifp, m);
2527 if (dst_if == NULL) {
2528 struct bridge_iflist *bif;
2529 struct mbuf *mc;
2530 int used = 0;
2531
2532 bridge_span(sc, m);
2533
2534 CK_LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
2535 dst_if = bif->bif_ifp;
2536
2537 if (dst_if->if_type == IFT_GIF)
2538 continue;
2539 if ((dst_if->if_drv_flags & IFF_DRV_RUNNING) == 0)
2540 continue;
2541
2542 /*
2543 * If this is not the original output interface,
2544 * and the interface is participating in spanning
2545 * tree, make sure the port is in a state that
2546 * allows forwarding.
2547 */
2548 if (dst_if != ifp && (bif->bif_flags & IFBIF_STP) &&
2549 bif->bif_stp.bp_state == BSTP_IFSTATE_DISCARDING)
2550 continue;
2551
2552 if (CK_LIST_NEXT(bif, bif_next) == NULL) {
2553 used = 1;
2554 mc = m;
2555 } else {
2556 mc = m_dup(m, M_NOWAIT);
2557 if (mc == NULL) {
2558 if_inc_counter(bifp, IFCOUNTER_OERRORS, 1);
2559 continue;
2560 }
2561 }
2562
2563 bridge_enqueue(sc, dst_if, mc, bif);
2564 }
2565 if (used == 0)
2566 m_freem(m);
2567 return (0);
2568 }
2569
2570 sendunicast:
2571 /*
2572 * XXX Spanning tree consideration here?
2573 */
2574
2575 bridge_span(sc, m);
2576 if ((dst_if->if_drv_flags & IFF_DRV_RUNNING) == 0) {
2577 m_freem(m);
2578 return (0);
2579 }
2580
2581 bridge_enqueue(sc, dst_if, m, NULL);
2582 return (0);
2583 }
2584
2585 /*
2586 * bridge_transmit:
2587 *
2588 * Do output on a bridge.
2589 *
2590 */
2591 static int
bridge_transmit(struct ifnet * ifp,struct mbuf * m)2592 bridge_transmit(struct ifnet *ifp, struct mbuf *m)
2593 {
2594 struct bridge_softc *sc;
2595 struct ether_header *eh;
2596 struct ifnet *dst_if;
2597 int error = 0;
2598 ether_vlanid_t vlan;
2599
2600 sc = ifp->if_softc;
2601
2602 ETHER_BPF_MTAP(ifp, m);
2603
2604 eh = mtod(m, struct ether_header *);
2605 vlan = VLANTAGOF(m);
2606
2607 if (((m->m_flags & (M_BCAST|M_MCAST)) == 0) &&
2608 (dst_if = bridge_rtlookup(sc, eh->ether_dhost, vlan)) != NULL) {
2609 error = bridge_enqueue(sc, dst_if, m, NULL);
2610 } else
2611 bridge_broadcast(sc, ifp, m, 0);
2612
2613 return (error);
2614 }
2615
2616 #ifdef ALTQ
2617 static void
bridge_altq_start(if_t ifp)2618 bridge_altq_start(if_t ifp)
2619 {
2620 struct ifaltq *ifq = &ifp->if_snd;
2621 struct mbuf *m;
2622
2623 IFQ_LOCK(ifq);
2624 IFQ_DEQUEUE_NOLOCK(ifq, m);
2625 while (m != NULL) {
2626 bridge_transmit(ifp, m);
2627 IFQ_DEQUEUE_NOLOCK(ifq, m);
2628 }
2629 IFQ_UNLOCK(ifq);
2630 }
2631
2632 static int
bridge_altq_transmit(if_t ifp,struct mbuf * m)2633 bridge_altq_transmit(if_t ifp, struct mbuf *m)
2634 {
2635 int err;
2636
2637 if (ALTQ_IS_ENABLED(&ifp->if_snd)) {
2638 IFQ_ENQUEUE(&ifp->if_snd, m, err);
2639 if (err == 0)
2640 bridge_altq_start(ifp);
2641 } else
2642 err = bridge_transmit(ifp, m);
2643
2644 return (err);
2645 }
2646 #endif /* ALTQ */
2647
2648 /*
2649 * The ifp->if_qflush entry point for if_bridge(4) is no-op.
2650 */
2651 static void
bridge_qflush(struct ifnet * ifp __unused)2652 bridge_qflush(struct ifnet *ifp __unused)
2653 {
2654 }
2655
2656 /*
2657 * bridge_forward:
2658 *
2659 * The forwarding function of the bridge.
2660 *
2661 * NOTE: Releases the lock on return.
2662 */
2663 static void
bridge_forward(struct bridge_softc * sc,struct bridge_iflist * sbif,struct mbuf * m)2664 bridge_forward(struct bridge_softc *sc, struct bridge_iflist *sbif,
2665 struct mbuf *m)
2666 {
2667 struct bridge_iflist *dbif;
2668 struct ifnet *src_if, *dst_if, *ifp;
2669 struct ether_header *eh;
2670 uint8_t *dst;
2671 int error;
2672 ether_vlanid_t vlan;
2673
2674 NET_EPOCH_ASSERT();
2675
2676 src_if = m->m_pkthdr.rcvif;
2677 ifp = sc->sc_ifp;
2678 vlan = VLANTAGOF(m);
2679
2680 if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
2681 if_inc_counter(ifp, IFCOUNTER_IBYTES, m->m_pkthdr.len);
2682
2683 if ((sbif->bif_flags & IFBIF_STP) &&
2684 sbif->bif_stp.bp_state == BSTP_IFSTATE_DISCARDING)
2685 goto drop;
2686
2687 eh = mtod(m, struct ether_header *);
2688 dst = eh->ether_dhost;
2689
2690 /* If the interface is learning, record the address. */
2691 if (sbif->bif_flags & IFBIF_LEARNING) {
2692 error = bridge_rtupdate(sc, eh->ether_shost, vlan,
2693 sbif, 0, IFBAF_DYNAMIC);
2694 /*
2695 * If the interface has addresses limits then deny any source
2696 * that is not in the cache.
2697 */
2698 if (error && sbif->bif_addrmax)
2699 goto drop;
2700 }
2701
2702 if ((sbif->bif_flags & IFBIF_STP) != 0 &&
2703 sbif->bif_stp.bp_state == BSTP_IFSTATE_LEARNING)
2704 goto drop;
2705
2706 #ifdef DEV_NETMAP
2707 /*
2708 * Hand the packet to netmap only if it wasn't injected by netmap
2709 * itself.
2710 */
2711 if ((m->m_flags & M_BRIDGE_INJECT) == 0 &&
2712 (if_getcapenable(ifp) & IFCAP_NETMAP) != 0) {
2713 ifp->if_input(ifp, m);
2714 return;
2715 }
2716 m->m_flags &= ~M_BRIDGE_INJECT;
2717 #endif
2718
2719 /*
2720 * At this point, the port either doesn't participate
2721 * in spanning tree or it is in the forwarding state.
2722 */
2723
2724 /*
2725 * If the packet is unicast, destined for someone on
2726 * "this" side of the bridge, drop it.
2727 */
2728 if ((m->m_flags & (M_BCAST|M_MCAST)) == 0) {
2729 dst_if = bridge_rtlookup(sc, dst, vlan);
2730 if (src_if == dst_if)
2731 goto drop;
2732 } else {
2733 /*
2734 * Check if its a reserved multicast address, any address
2735 * listed in 802.1D section 7.12.6 may not be forwarded by the
2736 * bridge.
2737 * This is currently 01-80-C2-00-00-00 to 01-80-C2-00-00-0F
2738 */
2739 if (dst[0] == 0x01 && dst[1] == 0x80 &&
2740 dst[2] == 0xc2 && dst[3] == 0x00 &&
2741 dst[4] == 0x00 && dst[5] <= 0x0f)
2742 goto drop;
2743
2744 /* ...forward it to all interfaces. */
2745 if_inc_counter(ifp, IFCOUNTER_IMCASTS, 1);
2746 dst_if = NULL;
2747 }
2748
2749 /*
2750 * If we have a destination interface which is a member of our bridge,
2751 * OR this is a unicast packet, push it through the bpf(4) machinery.
2752 * For broadcast or multicast packets, don't bother because it will
2753 * be reinjected into ether_input. We do this before we pass the packets
2754 * through the pfil(9) framework, as it is possible that pfil(9) will
2755 * drop the packet, or possibly modify it, making it difficult to debug
2756 * firewall issues on the bridge.
2757 */
2758 if (dst_if != NULL || (m->m_flags & (M_BCAST | M_MCAST)) == 0)
2759 ETHER_BPF_MTAP(ifp, m);
2760
2761 /* run the packet filter */
2762 if (PFIL_HOOKED_IN_46) {
2763 if (bridge_pfil(&m, ifp, src_if, PFIL_IN) != 0)
2764 return;
2765 if (m == NULL)
2766 return;
2767 }
2768
2769 if (dst_if == NULL) {
2770 bridge_broadcast(sc, src_if, m, 1);
2771 return;
2772 }
2773
2774 /*
2775 * At this point, we're dealing with a unicast frame
2776 * going to a different interface.
2777 */
2778 if ((dst_if->if_drv_flags & IFF_DRV_RUNNING) == 0)
2779 goto drop;
2780
2781 dbif = bridge_lookup_member_if(sc, dst_if);
2782 if (dbif == NULL)
2783 /* Not a member of the bridge (anymore?) */
2784 goto drop;
2785
2786 /* Private segments can not talk to each other */
2787 if (sbif->bif_flags & dbif->bif_flags & IFBIF_PRIVATE)
2788 goto drop;
2789
2790 /* Do VLAN filtering. */
2791 if (!bridge_vfilter_out(dbif, m))
2792 goto drop;
2793
2794 if ((dbif->bif_flags & IFBIF_STP) &&
2795 dbif->bif_stp.bp_state == BSTP_IFSTATE_DISCARDING)
2796 goto drop;
2797
2798 if (PFIL_HOOKED_OUT_46) {
2799 if (bridge_pfil(&m, ifp, dst_if, PFIL_OUT) != 0)
2800 return;
2801 if (m == NULL)
2802 return;
2803 }
2804
2805 bridge_enqueue(sc, dst_if, m, dbif);
2806 return;
2807
2808 drop:
2809 m_freem(m);
2810 }
2811
2812 /*
2813 * bridge_input:
2814 *
2815 * Receive input from a member interface. Queue the packet for
2816 * bridging if it is not for us.
2817 */
2818 static struct mbuf *
bridge_input(struct ifnet * ifp,struct mbuf * m)2819 bridge_input(struct ifnet *ifp, struct mbuf *m)
2820 {
2821 struct bridge_softc *sc = NULL;
2822 struct bridge_iflist *bif, *bif2;
2823 struct ifnet *bifp;
2824 struct ether_header *eh;
2825 struct mbuf *mc, *mc2;
2826 ether_vlanid_t vlan;
2827 int error;
2828
2829 NET_EPOCH_ASSERT();
2830
2831 eh = mtod(m, struct ether_header *);
2832 vlan = VLANTAGOF(m);
2833
2834 bif = ifp->if_bridge;
2835 if (bif)
2836 sc = bif->bif_sc;
2837
2838 if (sc == NULL) {
2839 /*
2840 * This packet originated from the bridge itself, so it must
2841 * have been transmitted by netmap. Derive the "source"
2842 * interface from the source address and drop the packet if the
2843 * source address isn't known.
2844 */
2845 KASSERT((m->m_flags & M_BRIDGE_INJECT) != 0,
2846 ("%s: ifnet %p missing a bridge softc", __func__, ifp));
2847 sc = if_getsoftc(ifp);
2848 ifp = bridge_rtlookup(sc, eh->ether_shost, vlan);
2849 if (ifp == NULL) {
2850 if_inc_counter(sc->sc_ifp, IFCOUNTER_IERRORS, 1);
2851 m_freem(m);
2852 return (NULL);
2853 }
2854 m->m_pkthdr.rcvif = ifp;
2855 }
2856 bifp = sc->sc_ifp;
2857 if ((bifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
2858 return (m);
2859
2860 /*
2861 * Implement support for bridge monitoring. If this flag has been
2862 * set on this interface, discard the packet once we push it through
2863 * the bpf(4) machinery, but before we do, increment the byte and
2864 * packet counters associated with this interface.
2865 */
2866 if ((bifp->if_flags & IFF_MONITOR) != 0) {
2867 m->m_pkthdr.rcvif = bifp;
2868 ETHER_BPF_MTAP(bifp, m);
2869 if_inc_counter(bifp, IFCOUNTER_IPACKETS, 1);
2870 if_inc_counter(bifp, IFCOUNTER_IBYTES, m->m_pkthdr.len);
2871 m_freem(m);
2872 return (NULL);
2873 }
2874
2875 /* Do VLAN filtering. */
2876 if (!bridge_vfilter_in(bif, m)) {
2877 if_inc_counter(sc->sc_ifp, IFCOUNTER_IERRORS, 1);
2878 m_freem(m);
2879 return (NULL);
2880 }
2881 /* bridge_vfilter_in() may add a tag */
2882 vlan = VLANTAGOF(m);
2883
2884 bridge_span(sc, m);
2885
2886 if (m->m_flags & (M_BCAST|M_MCAST)) {
2887 /* Tap off 802.1D packets; they do not get forwarded. */
2888 if (memcmp(eh->ether_dhost, bstp_etheraddr,
2889 ETHER_ADDR_LEN) == 0) {
2890 bstp_input(&bif->bif_stp, ifp, m); /* consumes mbuf */
2891 return (NULL);
2892 }
2893
2894 if ((bif->bif_flags & IFBIF_STP) &&
2895 bif->bif_stp.bp_state == BSTP_IFSTATE_DISCARDING) {
2896 return (m);
2897 }
2898
2899 /*
2900 * Make a deep copy of the packet and enqueue the copy
2901 * for bridge processing; return the original packet for
2902 * local processing.
2903 */
2904 mc = m_dup(m, M_NOWAIT);
2905 if (mc == NULL) {
2906 return (m);
2907 }
2908
2909 /* Perform the bridge forwarding function with the copy. */
2910 bridge_forward(sc, bif, mc);
2911
2912 #ifdef DEV_NETMAP
2913 /*
2914 * If netmap is enabled and has not already seen this packet,
2915 * then it will be consumed by bridge_forward().
2916 */
2917 if ((if_getcapenable(bifp) & IFCAP_NETMAP) != 0 &&
2918 (m->m_flags & M_BRIDGE_INJECT) == 0) {
2919 m_freem(m);
2920 return (NULL);
2921 }
2922 #endif
2923
2924 /*
2925 * Reinject the mbuf as arriving on the bridge so we have a
2926 * chance at claiming multicast packets. We can not loop back
2927 * here from ether_input as a bridge is never a member of a
2928 * bridge.
2929 */
2930 KASSERT(bifp->if_bridge == NULL,
2931 ("loop created in bridge_input"));
2932 mc2 = m_dup(m, M_NOWAIT);
2933 if (mc2 != NULL) {
2934 /* Keep the layer3 header aligned */
2935 int i = min(mc2->m_pkthdr.len, max_protohdr);
2936 mc2 = m_copyup(mc2, i, ETHER_ALIGN);
2937 }
2938 if (mc2 != NULL) {
2939 mc2->m_pkthdr.rcvif = bifp;
2940 mc2->m_flags &= ~M_BRIDGE_INJECT;
2941 sc->sc_if_input(bifp, mc2);
2942 }
2943
2944 /* Return the original packet for local processing. */
2945 return (m);
2946 }
2947
2948 if ((bif->bif_flags & IFBIF_STP) &&
2949 bif->bif_stp.bp_state == BSTP_IFSTATE_DISCARDING) {
2950 return (m);
2951 }
2952
2953 #if defined(INET) || defined(INET6)
2954 #define CARP_CHECK_WE_ARE_DST(iface) \
2955 ((iface)->if_carp && (*carp_forus_p)((iface), eh->ether_dhost))
2956 #define CARP_CHECK_WE_ARE_SRC(iface) \
2957 ((iface)->if_carp && (*carp_forus_p)((iface), eh->ether_shost))
2958 #else
2959 #define CARP_CHECK_WE_ARE_DST(iface) false
2960 #define CARP_CHECK_WE_ARE_SRC(iface) false
2961 #endif
2962
2963 #ifdef DEV_NETMAP
2964 #define GRAB_FOR_NETMAP(ifp, m) do { \
2965 if ((if_getcapenable(ifp) & IFCAP_NETMAP) != 0 && \
2966 ((m)->m_flags & M_BRIDGE_INJECT) == 0) { \
2967 (ifp)->if_input(ifp, m); \
2968 return (NULL); \
2969 } \
2970 } while (0)
2971 #else
2972 #define GRAB_FOR_NETMAP(ifp, m)
2973 #endif
2974
2975 #define GRAB_OUR_PACKETS(iface) \
2976 if ((iface)->if_type == IFT_GIF) \
2977 continue; \
2978 /* It is destined for us. */ \
2979 if (memcmp(IF_LLADDR(iface), eh->ether_dhost, ETHER_ADDR_LEN) == 0 || \
2980 CARP_CHECK_WE_ARE_DST(iface)) { \
2981 if (bif->bif_flags & IFBIF_LEARNING) { \
2982 error = bridge_rtupdate(sc, eh->ether_shost, \
2983 vlan, bif, 0, IFBAF_DYNAMIC); \
2984 if (error && bif->bif_addrmax) { \
2985 m_freem(m); \
2986 return (NULL); \
2987 } \
2988 } \
2989 m->m_pkthdr.rcvif = iface; \
2990 if ((iface) == ifp) { \
2991 /* Skip bridge processing... src == dest */ \
2992 return (m); \
2993 } \
2994 /* It's passing over or to the bridge, locally. */ \
2995 ETHER_BPF_MTAP(bifp, m); \
2996 if_inc_counter(bifp, IFCOUNTER_IPACKETS, 1); \
2997 if_inc_counter(bifp, IFCOUNTER_IBYTES, m->m_pkthdr.len);\
2998 /* Hand the packet over to netmap if necessary. */ \
2999 GRAB_FOR_NETMAP(bifp, m); \
3000 /* Filter on the physical interface. */ \
3001 if (V_pfil_local_phys && PFIL_HOOKED_IN_46) { \
3002 if (bridge_pfil(&m, NULL, ifp, \
3003 PFIL_IN) != 0 || m == NULL) { \
3004 return (NULL); \
3005 } \
3006 } \
3007 if ((iface) != bifp) \
3008 ETHER_BPF_MTAP(iface, m); \
3009 /* Pass tagged packets to if_vlan, if it's loaded */ \
3010 if (VLANTAGOF(m) != 0) { \
3011 if (bifp->if_vlantrunk == NULL) { \
3012 m_freem(m); \
3013 return (NULL); \
3014 } \
3015 (*vlan_input_p)(bifp, m); \
3016 return (NULL); \
3017 } \
3018 return (m); \
3019 } \
3020 \
3021 /* We just received a packet that we sent out. */ \
3022 if (memcmp(IF_LLADDR(iface), eh->ether_shost, ETHER_ADDR_LEN) == 0 || \
3023 CARP_CHECK_WE_ARE_SRC(iface)) { \
3024 m_freem(m); \
3025 return (NULL); \
3026 }
3027
3028 /*
3029 * Unicast. Make sure it's not for the bridge.
3030 */
3031 do { GRAB_OUR_PACKETS(bifp) } while (0);
3032
3033 /*
3034 * Check the interface the packet arrived on. For tagged frames,
3035 * we need to do this even if member_ifaddrs is disabled because
3036 * vlan(4) might need to handle the traffic.
3037 */
3038 if (V_member_ifaddrs || (vlan && ifp->if_vlantrunk))
3039 do { GRAB_OUR_PACKETS(ifp) } while (0);
3040
3041 /*
3042 * We only need to check other members interface if member_ifaddrs
3043 * is enabled; otherwise we should have never traffic destined for
3044 * a member's lladdr.
3045 */
3046 if (V_member_ifaddrs) {
3047 CK_LIST_FOREACH(bif2, &sc->sc_iflist, bif_next) {
3048 GRAB_OUR_PACKETS(bif2->bif_ifp)
3049 }
3050 }
3051
3052 #undef CARP_CHECK_WE_ARE_DST
3053 #undef CARP_CHECK_WE_ARE_SRC
3054 #undef GRAB_FOR_NETMAP
3055 #undef GRAB_OUR_PACKETS
3056
3057 /* Perform the bridge forwarding function. */
3058 bridge_forward(sc, bif, m);
3059
3060 return (NULL);
3061 }
3062
3063 /*
3064 * Inject a packet back into the host ethernet stack. This will generally only
3065 * be used by netmap when an application writes to the host TX ring. The
3066 * M_BRIDGE_INJECT flag ensures that the packet is re-routed to the bridge
3067 * interface after ethernet processing.
3068 */
3069 static void
bridge_inject(struct ifnet * ifp,struct mbuf * m)3070 bridge_inject(struct ifnet *ifp, struct mbuf *m)
3071 {
3072 struct bridge_softc *sc;
3073
3074 if (ifp->if_type == IFT_L2VLAN) {
3075 /*
3076 * vlan(4) gives us the vlan ifnet, so we need to get the
3077 * bridge softc to get a pointer to ether_input to send the
3078 * packet to.
3079 */
3080 struct ifnet *bifp = NULL;
3081
3082 if (vlan_trunkdev_p == NULL) {
3083 m_freem(m);
3084 return;
3085 }
3086
3087 bifp = vlan_trunkdev_p(ifp);
3088 if (bifp == NULL) {
3089 m_freem(m);
3090 return;
3091 }
3092
3093 sc = if_getsoftc(bifp);
3094 sc->sc_if_input(ifp, m);
3095 return;
3096 }
3097
3098 KASSERT((if_getcapenable(ifp) & IFCAP_NETMAP) != 0,
3099 ("%s: iface %s is not running in netmap mode",
3100 __func__, if_name(ifp)));
3101 KASSERT((m->m_flags & M_BRIDGE_INJECT) == 0,
3102 ("%s: mbuf %p has M_BRIDGE_INJECT set", __func__, m));
3103
3104 m->m_flags |= M_BRIDGE_INJECT;
3105 sc = if_getsoftc(ifp);
3106 sc->sc_if_input(ifp, m);
3107 }
3108
3109 /*
3110 * bridge_broadcast:
3111 *
3112 * Send a frame to all interfaces that are members of
3113 * the bridge, except for the one on which the packet
3114 * arrived.
3115 *
3116 * NOTE: Releases the lock on return.
3117 */
3118 static void
bridge_broadcast(struct bridge_softc * sc,struct ifnet * src_if,struct mbuf * m,int runfilt)3119 bridge_broadcast(struct bridge_softc *sc, struct ifnet *src_if,
3120 struct mbuf *m, int runfilt)
3121 {
3122 struct bridge_iflist *dbif, *sbif;
3123 struct mbuf *mc;
3124 struct ifnet *dst_if;
3125 int used = 0, i;
3126
3127 NET_EPOCH_ASSERT();
3128
3129 sbif = bridge_lookup_member_if(sc, src_if);
3130
3131 /* Filter on the bridge interface before broadcasting */
3132 if (runfilt && PFIL_HOOKED_OUT_46) {
3133 if (bridge_pfil(&m, sc->sc_ifp, NULL, PFIL_OUT) != 0)
3134 return;
3135 if (m == NULL)
3136 return;
3137 }
3138
3139 CK_LIST_FOREACH(dbif, &sc->sc_iflist, bif_next) {
3140 dst_if = dbif->bif_ifp;
3141 if (dst_if == src_if)
3142 continue;
3143
3144 /* Private segments can not talk to each other */
3145 if (sbif && (sbif->bif_flags & dbif->bif_flags & IFBIF_PRIVATE))
3146 continue;
3147
3148 /* Do VLAN filtering. */
3149 if (!bridge_vfilter_out(dbif, m))
3150 continue;
3151
3152 if ((dbif->bif_flags & IFBIF_STP) &&
3153 dbif->bif_stp.bp_state == BSTP_IFSTATE_DISCARDING)
3154 continue;
3155
3156 if ((dbif->bif_flags & IFBIF_DISCOVER) == 0 &&
3157 (m->m_flags & (M_BCAST|M_MCAST)) == 0)
3158 continue;
3159
3160 if ((dst_if->if_drv_flags & IFF_DRV_RUNNING) == 0)
3161 continue;
3162
3163 if (CK_LIST_NEXT(dbif, bif_next) == NULL) {
3164 mc = m;
3165 used = 1;
3166 } else {
3167 mc = m_dup(m, M_NOWAIT);
3168 if (mc == NULL) {
3169 if_inc_counter(sc->sc_ifp, IFCOUNTER_OERRORS, 1);
3170 continue;
3171 }
3172 }
3173
3174 /*
3175 * Filter on the output interface. Pass a NULL bridge interface
3176 * pointer so we do not redundantly filter on the bridge for
3177 * each interface we broadcast on.
3178 */
3179 if (runfilt && PFIL_HOOKED_OUT_46) {
3180 if (used == 0) {
3181 /* Keep the layer3 header aligned */
3182 i = min(mc->m_pkthdr.len, max_protohdr);
3183 mc = m_copyup(mc, i, ETHER_ALIGN);
3184 if (mc == NULL) {
3185 if_inc_counter(sc->sc_ifp, IFCOUNTER_OERRORS, 1);
3186 continue;
3187 }
3188 }
3189 if (bridge_pfil(&mc, NULL, dst_if, PFIL_OUT) != 0)
3190 continue;
3191 if (mc == NULL)
3192 continue;
3193 }
3194
3195 bridge_enqueue(sc, dst_if, mc, dbif);
3196 }
3197 if (used == 0)
3198 m_freem(m);
3199 }
3200
3201 /*
3202 * bridge_span:
3203 *
3204 * Duplicate a packet out one or more interfaces that are in span mode,
3205 * the original mbuf is unmodified.
3206 */
3207 static void
bridge_span(struct bridge_softc * sc,struct mbuf * m)3208 bridge_span(struct bridge_softc *sc, struct mbuf *m)
3209 {
3210 struct bridge_iflist *bif;
3211 struct ifnet *dst_if;
3212 struct mbuf *mc;
3213
3214 NET_EPOCH_ASSERT();
3215
3216 if (CK_LIST_EMPTY(&sc->sc_spanlist))
3217 return;
3218
3219 CK_LIST_FOREACH(bif, &sc->sc_spanlist, bif_next) {
3220 dst_if = bif->bif_ifp;
3221
3222 if ((dst_if->if_drv_flags & IFF_DRV_RUNNING) == 0)
3223 continue;
3224
3225 mc = m_dup(m, M_NOWAIT);
3226 if (mc == NULL) {
3227 if_inc_counter(sc->sc_ifp, IFCOUNTER_OERRORS, 1);
3228 continue;
3229 }
3230
3231 bridge_enqueue(sc, dst_if, mc, bif);
3232 }
3233 }
3234
3235 /*
3236 * Incoming VLAN filtering. Given a frame and the member interface it was
3237 * received on, decide whether the port configuration allows it.
3238 */
3239 static bool
bridge_vfilter_in(const struct bridge_iflist * sbif,struct mbuf * m)3240 bridge_vfilter_in(const struct bridge_iflist *sbif, struct mbuf *m)
3241 {
3242 ether_vlanid_t vlan;
3243
3244 vlan = VLANTAGOF(m);
3245 /* Make sure the vlan id is reasonable. */
3246 if (vlan > DOT1Q_VID_MAX)
3247 return (false);
3248
3249 /* If VLAN filtering isn't enabled, pass everything. */
3250 if ((sbif->bif_sc->sc_flags & IFBRF_VLANFILTER) == 0)
3251 return (true);
3252
3253 if (vlan == DOT1Q_VID_NULL) {
3254 /*
3255 * The frame doesn't have a tag. If the interface does not
3256 * have an untagged vlan configured, drop the frame.
3257 */
3258 if (sbif->bif_pvid == DOT1Q_VID_NULL)
3259 return (false);
3260
3261 /*
3262 * Otherwise, insert a new tag based on the interface's
3263 * untagged vlan id.
3264 */
3265 m->m_pkthdr.ether_vtag = sbif->bif_pvid;
3266 m->m_flags |= M_VLANTAG;
3267 } else {
3268 /*
3269 * The frame has a tag, so check it matches the interface's
3270 * vlan access list. We explicitly do not accept tagged
3271 * frames for the untagged vlan id here (unless it's also
3272 * in the access list).
3273 */
3274 if (!BRVLAN_TEST(&sbif->bif_vlan_set, vlan))
3275 return (false);
3276 }
3277
3278 /* Accept the frame. */
3279 return (true);
3280 }
3281
3282 /*
3283 * Outgoing VLAN filtering. Given a frame, its vlan, and the member interface
3284 * we intend to send it to, decide whether the port configuration allows it to
3285 * be sent.
3286 */
3287 static bool
bridge_vfilter_out(const struct bridge_iflist * dbif,const struct mbuf * m)3288 bridge_vfilter_out(const struct bridge_iflist *dbif, const struct mbuf *m)
3289 {
3290 struct ether_header *eh;
3291 ether_vlanid_t vlan;
3292
3293 NET_EPOCH_ASSERT();
3294
3295 /* If VLAN filtering isn't enabled, pass everything. */
3296 if ((dbif->bif_sc->sc_flags & IFBRF_VLANFILTER) == 0)
3297 return (true);
3298
3299 vlan = VLANTAGOF(m);
3300
3301 /*
3302 * Always allow untagged 802.1D STP frames, even if they would
3303 * otherwise be dropped. This is required for STP to work on
3304 * a filtering bridge.
3305 *
3306 * Tagged STP (Cisco PVST+) is a non-standard extension, so
3307 * handle those frames via the normal filtering path.
3308 */
3309 eh = mtod(m, struct ether_header *);
3310 if (vlan == DOT1Q_VID_NULL &&
3311 memcmp(eh->ether_dhost, bstp_etheraddr, ETHER_ADDR_LEN) == 0)
3312 return (true);
3313
3314 /*
3315 * If the frame wasn't assigned to a vlan at ingress, drop it.
3316 * We can't forward these frames to filtering ports because we
3317 * don't know what VLAN they're supposed to be in.
3318 */
3319 if (vlan == DOT1Q_VID_NULL)
3320 return (false);
3321
3322 /*
3323 * If the frame's vlan matches the interfaces's untagged vlan,
3324 * allow it.
3325 */
3326 if (vlan == dbif->bif_pvid)
3327 return (true);
3328
3329 /*
3330 * If the frame's vlan is on the interface's tagged access list,
3331 * allow it.
3332 */
3333 if (BRVLAN_TEST(&dbif->bif_vlan_set, vlan))
3334 return (true);
3335
3336 /* The frame was not permitted, so drop it. */
3337 return (false);
3338 }
3339
3340 /*
3341 * bridge_rtupdate:
3342 *
3343 * Add a bridge routing entry.
3344 */
3345 static int
bridge_rtupdate(struct bridge_softc * sc,const uint8_t * dst,ether_vlanid_t vlan,struct bridge_iflist * bif,int setflags,uint8_t flags)3346 bridge_rtupdate(struct bridge_softc *sc, const uint8_t *dst,
3347 ether_vlanid_t vlan, struct bridge_iflist *bif,
3348 int setflags, uint8_t flags)
3349 {
3350 struct bridge_rtnode *brt;
3351 struct bridge_iflist *obif;
3352 int error;
3353
3354 BRIDGE_LOCK_OR_NET_EPOCH_ASSERT(sc);
3355
3356 /* Check the source address is valid and not multicast. */
3357 if (ETHER_IS_MULTICAST(dst))
3358 return (EXTERROR(EINVAL, "Multicast address not permitted"));
3359 if (dst[0] == 0 && dst[1] == 0 && dst[2] == 0 &&
3360 dst[3] == 0 && dst[4] == 0 && dst[5] == 0)
3361 return (EXTERROR(EINVAL, "Zero address not permitted"));
3362
3363 /*
3364 * A route for this destination might already exist. If so,
3365 * update it, otherwise create a new one.
3366 */
3367 if ((brt = bridge_rtnode_lookup(sc, dst, vlan)) == NULL) {
3368 BRIDGE_RT_LOCK(sc);
3369
3370 /* Check again, now that we have the lock. There could have
3371 * been a race and we only want to insert this once. */
3372 if (bridge_rtnode_lookup(sc, dst, vlan) != NULL) {
3373 BRIDGE_RT_UNLOCK(sc);
3374 return (0);
3375 }
3376
3377 if (sc->sc_brtcnt >= sc->sc_brtmax) {
3378 sc->sc_brtexceeded++;
3379 BRIDGE_RT_UNLOCK(sc);
3380 return (EXTERROR(ENOSPC, "Address table is full"));
3381 }
3382 /* Check per interface address limits (if enabled) */
3383 if (bif->bif_addrmax && bif->bif_addrcnt >= bif->bif_addrmax) {
3384 bif->bif_addrexceeded++;
3385 BRIDGE_RT_UNLOCK(sc);
3386 return (EXTERROR(ENOSPC,
3387 "Interface address limit exceeded"));
3388 }
3389
3390 /*
3391 * Allocate a new bridge forwarding node, and
3392 * initialize the expiration time and Ethernet
3393 * address.
3394 */
3395 brt = uma_zalloc(V_bridge_rtnode_zone, M_NOWAIT | M_ZERO);
3396 if (brt == NULL) {
3397 BRIDGE_RT_UNLOCK(sc);
3398 return (EXTERROR(ENOMEM,
3399 "Cannot allocate address node"));
3400 }
3401 brt->brt_vnet = curvnet;
3402
3403 if (bif->bif_flags & IFBIF_STICKY)
3404 brt->brt_flags = IFBAF_STICKY;
3405 else
3406 brt->brt_flags = IFBAF_DYNAMIC;
3407
3408 memcpy(brt->brt_addr, dst, ETHER_ADDR_LEN);
3409 brt->brt_vlan = vlan;
3410
3411 brt->brt_dst = bif;
3412 if ((error = bridge_rtnode_insert(sc, brt)) != 0) {
3413 uma_zfree(V_bridge_rtnode_zone, brt);
3414 BRIDGE_RT_UNLOCK(sc);
3415 return (error);
3416 }
3417 bif->bif_addrcnt++;
3418
3419 BRIDGE_RT_UNLOCK(sc);
3420 }
3421
3422 if ((brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC &&
3423 (obif = brt->brt_dst) != bif) {
3424 MPASS(obif != NULL);
3425
3426 BRIDGE_RT_LOCK(sc);
3427 brt->brt_dst->bif_addrcnt--;
3428 brt->brt_dst = bif;
3429 brt->brt_dst->bif_addrcnt++;
3430 BRIDGE_RT_UNLOCK(sc);
3431
3432 if (V_log_mac_flap &&
3433 ppsratecheck(&V_log_last, &V_log_count, V_log_interval)) {
3434 log(LOG_NOTICE,
3435 "%s: mac address %6D vlan %d moved from %s to %s\n",
3436 sc->sc_ifp->if_xname,
3437 &brt->brt_addr[0], ":",
3438 brt->brt_vlan,
3439 obif->bif_ifp->if_xname,
3440 bif->bif_ifp->if_xname);
3441 }
3442 }
3443
3444 if ((flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC)
3445 brt->brt_expire = time_uptime + sc->sc_brttimeout;
3446 if (setflags)
3447 brt->brt_flags = flags;
3448
3449 return (0);
3450 }
3451
3452 /*
3453 * bridge_rtlookup:
3454 *
3455 * Lookup the destination interface for an address.
3456 */
3457 static struct ifnet *
bridge_rtlookup(struct bridge_softc * sc,const uint8_t * addr,ether_vlanid_t vlan)3458 bridge_rtlookup(struct bridge_softc *sc, const uint8_t *addr,
3459 ether_vlanid_t vlan)
3460 {
3461 struct bridge_rtnode *brt;
3462
3463 NET_EPOCH_ASSERT();
3464
3465 if ((brt = bridge_rtnode_lookup(sc, addr, vlan)) == NULL)
3466 return (NULL);
3467
3468 return (brt->brt_ifp);
3469 }
3470
3471 /*
3472 * bridge_rttrim:
3473 *
3474 * Trim the routine table so that we have a number
3475 * of routing entries less than or equal to the
3476 * maximum number.
3477 */
3478 static void
bridge_rttrim(struct bridge_softc * sc)3479 bridge_rttrim(struct bridge_softc *sc)
3480 {
3481 struct bridge_rtnode *brt, *nbrt;
3482
3483 NET_EPOCH_ASSERT();
3484 BRIDGE_RT_LOCK_ASSERT(sc);
3485
3486 /* Make sure we actually need to do this. */
3487 if (sc->sc_brtcnt <= sc->sc_brtmax)
3488 return;
3489
3490 /* Force an aging cycle; this might trim enough addresses. */
3491 bridge_rtage(sc);
3492 if (sc->sc_brtcnt <= sc->sc_brtmax)
3493 return;
3494
3495 CK_LIST_FOREACH_SAFE(brt, &sc->sc_rtlist, brt_list, nbrt) {
3496 if ((brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC) {
3497 bridge_rtnode_destroy(sc, brt);
3498 if (sc->sc_brtcnt <= sc->sc_brtmax)
3499 return;
3500 }
3501 }
3502 }
3503
3504 /*
3505 * bridge_timer:
3506 *
3507 * Aging timer for the bridge.
3508 */
3509 static void
bridge_timer(void * arg)3510 bridge_timer(void *arg)
3511 {
3512 struct bridge_softc *sc = arg;
3513
3514 BRIDGE_RT_LOCK_ASSERT(sc);
3515
3516 /* Destruction of rtnodes requires a proper vnet context */
3517 CURVNET_SET(sc->sc_ifp->if_vnet);
3518 bridge_rtage(sc);
3519
3520 if (sc->sc_ifp->if_drv_flags & IFF_DRV_RUNNING)
3521 callout_reset(&sc->sc_brcallout,
3522 bridge_rtable_prune_period * hz, bridge_timer, sc);
3523 CURVNET_RESTORE();
3524 }
3525
3526 /*
3527 * bridge_rtage:
3528 *
3529 * Perform an aging cycle.
3530 */
3531 static void
bridge_rtage(struct bridge_softc * sc)3532 bridge_rtage(struct bridge_softc *sc)
3533 {
3534 struct bridge_rtnode *brt, *nbrt;
3535
3536 BRIDGE_RT_LOCK_ASSERT(sc);
3537
3538 CK_LIST_FOREACH_SAFE(brt, &sc->sc_rtlist, brt_list, nbrt) {
3539 if ((brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC) {
3540 if (time_uptime >= brt->brt_expire)
3541 bridge_rtnode_destroy(sc, brt);
3542 }
3543 }
3544 }
3545
3546 /*
3547 * bridge_rtflush:
3548 *
3549 * Remove all dynamic addresses from the bridge.
3550 */
3551 static void
bridge_rtflush(struct bridge_softc * sc,int full)3552 bridge_rtflush(struct bridge_softc *sc, int full)
3553 {
3554 struct bridge_rtnode *brt, *nbrt;
3555
3556 BRIDGE_RT_LOCK_ASSERT(sc);
3557
3558 CK_LIST_FOREACH_SAFE(brt, &sc->sc_rtlist, brt_list, nbrt) {
3559 if (full || (brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC)
3560 bridge_rtnode_destroy(sc, brt);
3561 }
3562 }
3563
3564 /*
3565 * bridge_rtdaddr:
3566 *
3567 * Remove an address from the table.
3568 */
3569 static int
bridge_rtdaddr(struct bridge_softc * sc,const uint8_t * addr,ether_vlanid_t vlan)3570 bridge_rtdaddr(struct bridge_softc *sc, const uint8_t *addr,
3571 ether_vlanid_t vlan)
3572 {
3573 struct bridge_rtnode *brt;
3574 int found = 0;
3575
3576 BRIDGE_RT_LOCK(sc);
3577
3578 /*
3579 * If vlan is DOT1Q_VID_RSVD_IMPL then we want to delete for all vlans
3580 * so the lookup may return more than one.
3581 */
3582 while ((brt = bridge_rtnode_lookup(sc, addr, vlan)) != NULL) {
3583 bridge_rtnode_destroy(sc, brt);
3584 found = 1;
3585 }
3586
3587 BRIDGE_RT_UNLOCK(sc);
3588
3589 return (found ? 0 : ENOENT);
3590 }
3591
3592 /*
3593 * bridge_rtdelete:
3594 *
3595 * Delete routes to a speicifc member interface.
3596 */
3597 static void
bridge_rtdelete(struct bridge_softc * sc,struct ifnet * ifp,int full)3598 bridge_rtdelete(struct bridge_softc *sc, struct ifnet *ifp, int full)
3599 {
3600 struct bridge_rtnode *brt, *nbrt;
3601
3602 BRIDGE_RT_LOCK_ASSERT(sc);
3603
3604 CK_LIST_FOREACH_SAFE(brt, &sc->sc_rtlist, brt_list, nbrt) {
3605 if (brt->brt_ifp == ifp && (full ||
3606 (brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC))
3607 bridge_rtnode_destroy(sc, brt);
3608 }
3609 }
3610
3611 /*
3612 * bridge_rtable_init:
3613 *
3614 * Initialize the route table for this bridge.
3615 */
3616 static void
bridge_rtable_init(struct bridge_softc * sc)3617 bridge_rtable_init(struct bridge_softc *sc)
3618 {
3619 int i;
3620
3621 sc->sc_rthash = malloc(sizeof(*sc->sc_rthash) * BRIDGE_RTHASH_SIZE,
3622 M_DEVBUF, M_WAITOK);
3623
3624 for (i = 0; i < BRIDGE_RTHASH_SIZE; i++)
3625 CK_LIST_INIT(&sc->sc_rthash[i]);
3626
3627 sc->sc_rthash_key = arc4random();
3628 CK_LIST_INIT(&sc->sc_rtlist);
3629 }
3630
3631 /*
3632 * bridge_rtable_fini:
3633 *
3634 * Deconstruct the route table for this bridge.
3635 */
3636 static void
bridge_rtable_fini(struct bridge_softc * sc)3637 bridge_rtable_fini(struct bridge_softc *sc)
3638 {
3639
3640 KASSERT(sc->sc_brtcnt == 0,
3641 ("%s: %d bridge routes referenced", __func__, sc->sc_brtcnt));
3642 free(sc->sc_rthash, M_DEVBUF);
3643 }
3644
3645 /*
3646 * The following hash function is adapted from "Hash Functions" by Bob Jenkins
3647 * ("Algorithm Alley", Dr. Dobbs Journal, September 1997).
3648 */
3649 #define mix(a, b, c) \
3650 do { \
3651 a -= b; a -= c; a ^= (c >> 13); \
3652 b -= c; b -= a; b ^= (a << 8); \
3653 c -= a; c -= b; c ^= (b >> 13); \
3654 a -= b; a -= c; a ^= (c >> 12); \
3655 b -= c; b -= a; b ^= (a << 16); \
3656 c -= a; c -= b; c ^= (b >> 5); \
3657 a -= b; a -= c; a ^= (c >> 3); \
3658 b -= c; b -= a; b ^= (a << 10); \
3659 c -= a; c -= b; c ^= (b >> 15); \
3660 } while (/*CONSTCOND*/0)
3661
3662 static __inline uint32_t
bridge_rthash(struct bridge_softc * sc,const uint8_t * addr)3663 bridge_rthash(struct bridge_softc *sc, const uint8_t *addr)
3664 {
3665 uint32_t a = 0x9e3779b9, b = 0x9e3779b9, c = sc->sc_rthash_key;
3666
3667 b += addr[5] << 8;
3668 b += addr[4];
3669 a += addr[3] << 24;
3670 a += addr[2] << 16;
3671 a += addr[1] << 8;
3672 a += addr[0];
3673
3674 mix(a, b, c);
3675
3676 return (c & BRIDGE_RTHASH_MASK);
3677 }
3678
3679 #undef mix
3680
3681 static int
bridge_rtnode_addr_cmp(const uint8_t * a,const uint8_t * b)3682 bridge_rtnode_addr_cmp(const uint8_t *a, const uint8_t *b)
3683 {
3684 int i, d;
3685
3686 for (i = 0, d = 0; i < ETHER_ADDR_LEN && d == 0; i++) {
3687 d = ((int)a[i]) - ((int)b[i]);
3688 }
3689
3690 return (d);
3691 }
3692
3693 /*
3694 * bridge_rtnode_lookup:
3695 *
3696 * Look up a bridge route node for the specified destination. Compare the
3697 * vlan id or if zero then just return the first match.
3698 */
3699 static struct bridge_rtnode *
bridge_rtnode_lookup(struct bridge_softc * sc,const uint8_t * addr,ether_vlanid_t vlan)3700 bridge_rtnode_lookup(struct bridge_softc *sc, const uint8_t *addr,
3701 ether_vlanid_t vlan)
3702 {
3703 struct bridge_rtnode *brt;
3704 uint32_t hash;
3705 int dir;
3706
3707 BRIDGE_RT_LOCK_OR_NET_EPOCH_ASSERT(sc);
3708
3709 hash = bridge_rthash(sc, addr);
3710 CK_LIST_FOREACH(brt, &sc->sc_rthash[hash], brt_hash) {
3711 dir = bridge_rtnode_addr_cmp(addr, brt->brt_addr);
3712 if (dir == 0 && (brt->brt_vlan == vlan || vlan == DOT1Q_VID_RSVD_IMPL))
3713 return (brt);
3714 if (dir > 0)
3715 return (NULL);
3716 }
3717
3718 return (NULL);
3719 }
3720
3721 /*
3722 * bridge_rtnode_insert:
3723 *
3724 * Insert the specified bridge node into the route table. We
3725 * assume the entry is not already in the table.
3726 */
3727 static int
bridge_rtnode_insert(struct bridge_softc * sc,struct bridge_rtnode * brt)3728 bridge_rtnode_insert(struct bridge_softc *sc, struct bridge_rtnode *brt)
3729 {
3730 struct bridge_rtnode *lbrt;
3731 uint32_t hash;
3732 int dir;
3733
3734 BRIDGE_RT_LOCK_ASSERT(sc);
3735
3736 hash = bridge_rthash(sc, brt->brt_addr);
3737
3738 lbrt = CK_LIST_FIRST(&sc->sc_rthash[hash]);
3739 if (lbrt == NULL) {
3740 CK_LIST_INSERT_HEAD(&sc->sc_rthash[hash], brt, brt_hash);
3741 goto out;
3742 }
3743
3744 do {
3745 dir = bridge_rtnode_addr_cmp(brt->brt_addr, lbrt->brt_addr);
3746 if (dir == 0 && brt->brt_vlan == lbrt->brt_vlan)
3747 return (EXTERROR(EEXIST, "Address already exists"));
3748 if (dir > 0) {
3749 CK_LIST_INSERT_BEFORE(lbrt, brt, brt_hash);
3750 goto out;
3751 }
3752 if (CK_LIST_NEXT(lbrt, brt_hash) == NULL) {
3753 CK_LIST_INSERT_AFTER(lbrt, brt, brt_hash);
3754 goto out;
3755 }
3756 lbrt = CK_LIST_NEXT(lbrt, brt_hash);
3757 } while (lbrt != NULL);
3758
3759 #ifdef DIAGNOSTIC
3760 panic("bridge_rtnode_insert: impossible");
3761 #endif
3762
3763 out:
3764 CK_LIST_INSERT_HEAD(&sc->sc_rtlist, brt, brt_list);
3765 sc->sc_brtcnt++;
3766
3767 return (0);
3768 }
3769
3770 static void
bridge_rtnode_destroy_cb(struct epoch_context * ctx)3771 bridge_rtnode_destroy_cb(struct epoch_context *ctx)
3772 {
3773 struct bridge_rtnode *brt;
3774
3775 brt = __containerof(ctx, struct bridge_rtnode, brt_epoch_ctx);
3776
3777 CURVNET_SET(brt->brt_vnet);
3778 uma_zfree(V_bridge_rtnode_zone, brt);
3779 CURVNET_RESTORE();
3780 }
3781
3782 /*
3783 * bridge_rtnode_destroy:
3784 *
3785 * Destroy a bridge rtnode.
3786 */
3787 static void
bridge_rtnode_destroy(struct bridge_softc * sc,struct bridge_rtnode * brt)3788 bridge_rtnode_destroy(struct bridge_softc *sc, struct bridge_rtnode *brt)
3789 {
3790 BRIDGE_RT_LOCK_ASSERT(sc);
3791
3792 CK_LIST_REMOVE(brt, brt_hash);
3793
3794 CK_LIST_REMOVE(brt, brt_list);
3795 sc->sc_brtcnt--;
3796 brt->brt_dst->bif_addrcnt--;
3797
3798 NET_EPOCH_CALL(bridge_rtnode_destroy_cb, &brt->brt_epoch_ctx);
3799 }
3800
3801 /*
3802 * bridge_rtable_expire:
3803 *
3804 * Set the expiry time for all routes on an interface.
3805 */
3806 static void
bridge_rtable_expire(struct ifnet * ifp,int age)3807 bridge_rtable_expire(struct ifnet *ifp, int age)
3808 {
3809 struct bridge_iflist *bif = NULL;
3810 struct bridge_softc *sc = NULL;
3811 struct bridge_rtnode *brt;
3812
3813 CURVNET_SET(ifp->if_vnet);
3814
3815 bif = ifp->if_bridge;
3816 if (bif)
3817 sc = bif->bif_sc;
3818 MPASS(sc != NULL);
3819 BRIDGE_RT_LOCK(sc);
3820
3821 /*
3822 * If the age is zero then flush, otherwise set all the expiry times to
3823 * age for the interface
3824 */
3825 if (age == 0)
3826 bridge_rtdelete(sc, ifp, IFBF_FLUSHDYN);
3827 else {
3828 CK_LIST_FOREACH(brt, &sc->sc_rtlist, brt_list) {
3829 /* Cap the expiry time to 'age' */
3830 if (brt->brt_ifp == ifp &&
3831 brt->brt_expire > time_uptime + age &&
3832 (brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC)
3833 brt->brt_expire = time_uptime + age;
3834 }
3835 }
3836 BRIDGE_RT_UNLOCK(sc);
3837 CURVNET_RESTORE();
3838 }
3839
3840 /*
3841 * bridge_state_change:
3842 *
3843 * Callback from the bridgestp code when a port changes states.
3844 */
3845 static void
bridge_state_change(struct ifnet * ifp,int state)3846 bridge_state_change(struct ifnet *ifp, int state)
3847 {
3848 struct bridge_iflist *bif = ifp->if_bridge;
3849 struct bridge_softc *sc = bif->bif_sc;
3850 static const char *stpstates[] = {
3851 "disabled",
3852 "listening",
3853 "learning",
3854 "forwarding",
3855 "blocking",
3856 "discarding"
3857 };
3858
3859 CURVNET_SET(ifp->if_vnet);
3860 if (V_log_stp)
3861 log(LOG_NOTICE, "%s: state changed to %s on %s\n",
3862 sc->sc_ifp->if_xname, stpstates[state], ifp->if_xname);
3863 CURVNET_RESTORE();
3864 }
3865
3866 /*
3867 * Send bridge packets through pfil if they are one of the types pfil can deal
3868 * with, or if they are ARP or REVARP. (pfil will pass ARP and REVARP without
3869 * question.) If *bifp or *ifp are NULL then packet filtering is skipped for
3870 * that interface.
3871 */
3872 static int
bridge_pfil(struct mbuf ** mp,struct ifnet * bifp,struct ifnet * ifp,int dir)3873 bridge_pfil(struct mbuf **mp, struct ifnet *bifp, struct ifnet *ifp, int dir)
3874 {
3875 int snap, error, i;
3876 struct ether_header *eh1, eh2;
3877 struct llc llc1;
3878 u_int16_t ether_type;
3879 pfil_return_t rv;
3880 #ifdef INET
3881 struct ip *ip = NULL;
3882 int hlen = 0;
3883 #endif
3884
3885 snap = 0;
3886 error = -1; /* Default error if not error == 0 */
3887
3888 #if 0
3889 /* we may return with the IP fields swapped, ensure its not shared */
3890 KASSERT(M_WRITABLE(*mp), ("%s: modifying a shared mbuf", __func__));
3891 #endif
3892
3893 if (V_pfil_bridge == 0 && V_pfil_member == 0 && V_pfil_ipfw == 0)
3894 return (0); /* filtering is disabled */
3895
3896 i = min((*mp)->m_pkthdr.len, max_protohdr);
3897 if ((*mp)->m_len < i) {
3898 *mp = m_pullup(*mp, i);
3899 if (*mp == NULL) {
3900 printf("%s: m_pullup failed\n", __func__);
3901 return (-1);
3902 }
3903 }
3904
3905 eh1 = mtod(*mp, struct ether_header *);
3906 ether_type = ntohs(eh1->ether_type);
3907
3908 /*
3909 * Check for SNAP/LLC.
3910 */
3911 if (ether_type < ETHERMTU) {
3912 struct llc *llc2 = (struct llc *)(eh1 + 1);
3913
3914 if ((*mp)->m_len >= ETHER_HDR_LEN + 8 &&
3915 llc2->llc_dsap == LLC_SNAP_LSAP &&
3916 llc2->llc_ssap == LLC_SNAP_LSAP &&
3917 llc2->llc_control == LLC_UI) {
3918 ether_type = htons(llc2->llc_un.type_snap.ether_type);
3919 snap = 1;
3920 }
3921 }
3922
3923 /*
3924 * If we're trying to filter bridge traffic, only look at traffic for
3925 * protocols available in the kernel (IPv4 and/or IPv6) to avoid
3926 * passing traffic for an unsupported protocol to the filter. This is
3927 * lame since if we really wanted, say, an AppleTalk filter, we are
3928 * hosed, but of course we don't have an AppleTalk filter to begin
3929 * with. (Note that since pfil doesn't understand ARP it will pass
3930 * *ALL* ARP traffic.)
3931 */
3932 switch (ether_type) {
3933 #ifdef INET
3934 case ETHERTYPE_ARP:
3935 case ETHERTYPE_REVARP:
3936 if (V_pfil_ipfw_arp == 0)
3937 return (0); /* Automatically pass */
3938
3939 /* FALLTHROUGH */
3940 case ETHERTYPE_IP:
3941 #endif
3942 #ifdef INET6
3943 case ETHERTYPE_IPV6:
3944 #endif /* INET6 */
3945 break;
3946
3947 default:
3948 /*
3949 * We get here if the packet isn't from a supported
3950 * protocol. Check to see if the user wants to pass
3951 * non-IP packets, these will not be checked by pfil(9)
3952 * and passed unconditionally so the default is to
3953 * drop.
3954 */
3955 if (V_pfil_onlyip)
3956 goto bad;
3957 }
3958
3959 /* Run the packet through pfil before stripping link headers */
3960 if (PFIL_HOOKED_OUT(V_link_pfil_head) && V_pfil_ipfw != 0 &&
3961 dir == PFIL_OUT && ifp != NULL) {
3962 switch (pfil_mbuf_out(V_link_pfil_head, mp, ifp, NULL)) {
3963 case PFIL_DROPPED:
3964 return (EACCES);
3965 case PFIL_CONSUMED:
3966 return (0);
3967 }
3968 }
3969
3970 /* Strip off the Ethernet header and keep a copy. */
3971 m_copydata(*mp, 0, ETHER_HDR_LEN, (caddr_t) &eh2);
3972 m_adj(*mp, ETHER_HDR_LEN);
3973
3974 /* Strip off snap header, if present */
3975 if (snap) {
3976 m_copydata(*mp, 0, sizeof(struct llc), (caddr_t) &llc1);
3977 m_adj(*mp, sizeof(struct llc));
3978 }
3979
3980 /*
3981 * Check the IP header for alignment and errors
3982 */
3983 if (dir == PFIL_IN) {
3984 switch (ether_type) {
3985 #ifdef INET
3986 case ETHERTYPE_IP:
3987 error = bridge_ip_checkbasic(mp);
3988 break;
3989 #endif
3990 #ifdef INET6
3991 case ETHERTYPE_IPV6:
3992 error = bridge_ip6_checkbasic(mp);
3993 break;
3994 #endif /* INET6 */
3995 default:
3996 error = 0;
3997 }
3998 if (error)
3999 goto bad;
4000 }
4001
4002 error = 0;
4003
4004 /*
4005 * Run the packet through pfil
4006 */
4007 rv = PFIL_PASS;
4008 switch (ether_type) {
4009 #ifdef INET
4010 case ETHERTYPE_IP:
4011 /*
4012 * Run pfil on the member interface and the bridge, both can
4013 * be skipped by clearing pfil_member or pfil_bridge.
4014 *
4015 * Keep the order:
4016 * in_if -> bridge_if -> out_if
4017 */
4018 if (V_pfil_bridge && dir == PFIL_OUT && bifp != NULL && (rv =
4019 pfil_mbuf_out(V_inet_pfil_head, mp, bifp, NULL)) !=
4020 PFIL_PASS)
4021 break;
4022
4023 if (V_pfil_member && ifp != NULL) {
4024 rv = (dir == PFIL_OUT) ?
4025 pfil_mbuf_out(V_inet_pfil_head, mp, ifp, NULL) :
4026 pfil_mbuf_in(V_inet_pfil_head, mp, ifp, NULL);
4027 if (rv != PFIL_PASS)
4028 break;
4029 }
4030
4031 if (V_pfil_bridge && dir == PFIL_IN && bifp != NULL && (rv =
4032 pfil_mbuf_in(V_inet_pfil_head, mp, bifp, NULL)) !=
4033 PFIL_PASS)
4034 break;
4035
4036 /* check if we need to fragment the packet */
4037 /* bridge_fragment generates a mbuf chain of packets */
4038 /* that already include eth headers */
4039 if (V_pfil_member && ifp != NULL && dir == PFIL_OUT) {
4040 i = (*mp)->m_pkthdr.len;
4041 if (i > ifp->if_mtu) {
4042 error = bridge_fragment(ifp, mp, &eh2, snap,
4043 &llc1);
4044 return (error);
4045 }
4046 }
4047
4048 /* Recalculate the ip checksum. */
4049 ip = mtod(*mp, struct ip *);
4050 hlen = ip->ip_hl << 2;
4051 if (hlen < sizeof(struct ip))
4052 goto bad;
4053 if (hlen > (*mp)->m_len) {
4054 if ((*mp = m_pullup(*mp, hlen)) == NULL)
4055 goto bad;
4056 ip = mtod(*mp, struct ip *);
4057 if (ip == NULL)
4058 goto bad;
4059 }
4060 ip->ip_sum = 0;
4061 if (hlen == sizeof(struct ip))
4062 ip->ip_sum = in_cksum_hdr(ip);
4063 else
4064 ip->ip_sum = in_cksum(*mp, hlen);
4065
4066 break;
4067 #endif /* INET */
4068 #ifdef INET6
4069 case ETHERTYPE_IPV6:
4070 if (V_pfil_bridge && dir == PFIL_OUT && bifp != NULL && (rv =
4071 pfil_mbuf_out(V_inet6_pfil_head, mp, bifp, NULL)) !=
4072 PFIL_PASS)
4073 break;
4074
4075 if (V_pfil_member && ifp != NULL) {
4076 rv = (dir == PFIL_OUT) ?
4077 pfil_mbuf_out(V_inet6_pfil_head, mp, ifp, NULL) :
4078 pfil_mbuf_in(V_inet6_pfil_head, mp, ifp, NULL);
4079 if (rv != PFIL_PASS)
4080 break;
4081 }
4082
4083 if (V_pfil_bridge && dir == PFIL_IN && bifp != NULL && (rv =
4084 pfil_mbuf_in(V_inet6_pfil_head, mp, bifp, NULL)) !=
4085 PFIL_PASS)
4086 break;
4087 break;
4088 #endif
4089 }
4090
4091 switch (rv) {
4092 case PFIL_CONSUMED:
4093 return (0);
4094 case PFIL_DROPPED:
4095 return (EACCES);
4096 default:
4097 break;
4098 }
4099
4100 error = -1;
4101
4102 /*
4103 * Finally, put everything back the way it was and return
4104 */
4105 if (snap) {
4106 M_PREPEND(*mp, sizeof(struct llc), M_NOWAIT);
4107 if (*mp == NULL)
4108 return (error);
4109 bcopy(&llc1, mtod(*mp, caddr_t), sizeof(struct llc));
4110 }
4111
4112 M_PREPEND(*mp, ETHER_HDR_LEN, M_NOWAIT);
4113 if (*mp == NULL)
4114 return (error);
4115 bcopy(&eh2, mtod(*mp, caddr_t), ETHER_HDR_LEN);
4116
4117 return (0);
4118
4119 bad:
4120 m_freem(*mp);
4121 *mp = NULL;
4122 return (error);
4123 }
4124
4125 #ifdef INET
4126 /*
4127 * Perform basic checks on header size since
4128 * pfil assumes ip_input has already processed
4129 * it for it. Cut-and-pasted from ip_input.c.
4130 * Given how simple the IPv6 version is,
4131 * does the IPv4 version really need to be
4132 * this complicated?
4133 *
4134 * XXX Should we update ipstat here, or not?
4135 * XXX Right now we update ipstat but not
4136 * XXX csum_counter.
4137 */
4138 static int
bridge_ip_checkbasic(struct mbuf ** mp)4139 bridge_ip_checkbasic(struct mbuf **mp)
4140 {
4141 struct mbuf *m = *mp;
4142 struct ip *ip;
4143 int len, hlen;
4144 u_short sum;
4145
4146 if (*mp == NULL)
4147 return (-1);
4148
4149 if (IP_HDR_ALIGNED_P(mtod(m, caddr_t)) == 0) {
4150 if ((m = m_copyup(m, sizeof(struct ip),
4151 (max_linkhdr + 3) & ~3)) == NULL) {
4152 /* XXXJRT new stat, please */
4153 KMOD_IPSTAT_INC(ips_toosmall);
4154 goto bad;
4155 }
4156 } else if (__predict_false(m->m_len < sizeof (struct ip))) {
4157 if ((m = m_pullup(m, sizeof (struct ip))) == NULL) {
4158 KMOD_IPSTAT_INC(ips_toosmall);
4159 goto bad;
4160 }
4161 }
4162 ip = mtod(m, struct ip *);
4163 if (ip == NULL) goto bad;
4164
4165 if (ip->ip_v != IPVERSION) {
4166 KMOD_IPSTAT_INC(ips_badvers);
4167 goto bad;
4168 }
4169 hlen = ip->ip_hl << 2;
4170 if (hlen < sizeof(struct ip)) { /* minimum header length */
4171 KMOD_IPSTAT_INC(ips_badhlen);
4172 goto bad;
4173 }
4174 if (hlen > m->m_len) {
4175 if ((m = m_pullup(m, hlen)) == NULL) {
4176 KMOD_IPSTAT_INC(ips_badhlen);
4177 goto bad;
4178 }
4179 ip = mtod(m, struct ip *);
4180 if (ip == NULL) goto bad;
4181 }
4182
4183 if (m->m_pkthdr.csum_flags & CSUM_IP_CHECKED) {
4184 sum = !(m->m_pkthdr.csum_flags & CSUM_IP_VALID);
4185 } else {
4186 if (hlen == sizeof(struct ip)) {
4187 sum = in_cksum_hdr(ip);
4188 } else {
4189 sum = in_cksum(m, hlen);
4190 }
4191 }
4192 if (sum) {
4193 KMOD_IPSTAT_INC(ips_badsum);
4194 goto bad;
4195 }
4196
4197 /* Retrieve the packet length. */
4198 len = ntohs(ip->ip_len);
4199
4200 /*
4201 * Check for additional length bogosity
4202 */
4203 if (len < hlen) {
4204 KMOD_IPSTAT_INC(ips_badlen);
4205 goto bad;
4206 }
4207
4208 /*
4209 * Check that the amount of data in the buffers
4210 * is as at least much as the IP header would have us expect.
4211 * Drop packet if shorter than we expect.
4212 */
4213 if (m->m_pkthdr.len < len) {
4214 KMOD_IPSTAT_INC(ips_tooshort);
4215 goto bad;
4216 }
4217
4218 /* Checks out, proceed */
4219 *mp = m;
4220 return (0);
4221
4222 bad:
4223 *mp = m;
4224 return (-1);
4225 }
4226 #endif /* INET */
4227
4228 #ifdef INET6
4229 /*
4230 * Same as above, but for IPv6.
4231 * Cut-and-pasted from ip6_input.c.
4232 * XXX Should we update ip6stat, or not?
4233 */
4234 static int
bridge_ip6_checkbasic(struct mbuf ** mp)4235 bridge_ip6_checkbasic(struct mbuf **mp)
4236 {
4237 struct mbuf *m = *mp;
4238 struct ip6_hdr *ip6;
4239
4240 /*
4241 * If the IPv6 header is not aligned, slurp it up into a new
4242 * mbuf with space for link headers, in the event we forward
4243 * it. Otherwise, if it is aligned, make sure the entire base
4244 * IPv6 header is in the first mbuf of the chain.
4245 */
4246 if (IP6_HDR_ALIGNED_P(mtod(m, caddr_t)) == 0) {
4247 struct ifnet *inifp = m->m_pkthdr.rcvif;
4248 if ((m = m_copyup(m, sizeof(struct ip6_hdr),
4249 (max_linkhdr + 3) & ~3)) == NULL) {
4250 /* XXXJRT new stat, please */
4251 IP6STAT_INC(ip6s_toosmall);
4252 in6_ifstat_inc(inifp, ifs6_in_hdrerr);
4253 goto bad;
4254 }
4255 } else if (__predict_false(m->m_len < sizeof(struct ip6_hdr))) {
4256 struct ifnet *inifp = m->m_pkthdr.rcvif;
4257 if ((m = m_pullup(m, sizeof(struct ip6_hdr))) == NULL) {
4258 IP6STAT_INC(ip6s_toosmall);
4259 in6_ifstat_inc(inifp, ifs6_in_hdrerr);
4260 goto bad;
4261 }
4262 }
4263
4264 ip6 = mtod(m, struct ip6_hdr *);
4265
4266 if ((ip6->ip6_vfc & IPV6_VERSION_MASK) != IPV6_VERSION) {
4267 IP6STAT_INC(ip6s_badvers);
4268 in6_ifstat_inc(m->m_pkthdr.rcvif, ifs6_in_hdrerr);
4269 goto bad;
4270 }
4271
4272 /* Checks out, proceed */
4273 *mp = m;
4274 return (0);
4275
4276 bad:
4277 *mp = m;
4278 return (-1);
4279 }
4280 #endif /* INET6 */
4281
4282 #ifdef INET
4283 /*
4284 * bridge_fragment:
4285 *
4286 * Fragment mbuf chain in multiple packets and prepend ethernet header.
4287 */
4288 static int
bridge_fragment(struct ifnet * ifp,struct mbuf ** mp,struct ether_header * eh,int snap,struct llc * llc)4289 bridge_fragment(struct ifnet *ifp, struct mbuf **mp, struct ether_header *eh,
4290 int snap, struct llc *llc)
4291 {
4292 struct mbuf *m = *mp, *nextpkt = NULL, *mprev = NULL, *mcur = NULL;
4293 struct ip *ip;
4294 int error = -1;
4295
4296 if (m->m_len < sizeof(struct ip) &&
4297 (m = m_pullup(m, sizeof(struct ip))) == NULL)
4298 goto dropit;
4299 ip = mtod(m, struct ip *);
4300
4301 m->m_pkthdr.csum_flags |= CSUM_IP;
4302 error = ip_fragment(ip, &m, ifp->if_mtu, ifp->if_hwassist);
4303 if (error)
4304 goto dropit;
4305
4306 /*
4307 * Walk the chain and re-add the Ethernet header for
4308 * each mbuf packet.
4309 */
4310 for (mcur = m; mcur; mcur = mcur->m_nextpkt) {
4311 nextpkt = mcur->m_nextpkt;
4312 mcur->m_nextpkt = NULL;
4313 if (snap) {
4314 M_PREPEND(mcur, sizeof(struct llc), M_NOWAIT);
4315 if (mcur == NULL) {
4316 error = ENOBUFS;
4317 if (mprev != NULL)
4318 mprev->m_nextpkt = nextpkt;
4319 goto dropit;
4320 }
4321 bcopy(llc, mtod(mcur, caddr_t),sizeof(struct llc));
4322 }
4323
4324 M_PREPEND(mcur, ETHER_HDR_LEN, M_NOWAIT);
4325 if (mcur == NULL) {
4326 error = ENOBUFS;
4327 if (mprev != NULL)
4328 mprev->m_nextpkt = nextpkt;
4329 goto dropit;
4330 }
4331 bcopy(eh, mtod(mcur, caddr_t), ETHER_HDR_LEN);
4332
4333 /*
4334 * The previous two M_PREPEND could have inserted one or two
4335 * mbufs in front so we have to update the previous packet's
4336 * m_nextpkt.
4337 */
4338 mcur->m_nextpkt = nextpkt;
4339 if (mprev != NULL)
4340 mprev->m_nextpkt = mcur;
4341 else {
4342 /* The first mbuf in the original chain needs to be
4343 * updated. */
4344 *mp = mcur;
4345 }
4346 mprev = mcur;
4347 }
4348
4349 KMOD_IPSTAT_INC(ips_fragmented);
4350 return (error);
4351
4352 dropit:
4353 for (mcur = *mp; mcur; mcur = m) { /* droping the full packet chain */
4354 m = mcur->m_nextpkt;
4355 m_freem(mcur);
4356 }
4357 return (error);
4358 }
4359 #endif /* INET */
4360
4361 static void
bridge_linkstate(struct ifnet * ifp)4362 bridge_linkstate(struct ifnet *ifp)
4363 {
4364 struct bridge_softc *sc = NULL;
4365 struct bridge_iflist *bif;
4366 struct epoch_tracker et;
4367
4368 NET_EPOCH_ENTER(et);
4369
4370 bif = ifp->if_bridge;
4371 if (bif)
4372 sc = bif->bif_sc;
4373
4374 if (sc != NULL) {
4375 bridge_linkcheck(sc);
4376 bstp_linkstate(&bif->bif_stp);
4377 }
4378
4379 NET_EPOCH_EXIT(et);
4380 }
4381
4382 static void
bridge_linkcheck(struct bridge_softc * sc)4383 bridge_linkcheck(struct bridge_softc *sc)
4384 {
4385 struct bridge_iflist *bif;
4386 int new_link, hasls;
4387
4388 BRIDGE_LOCK_OR_NET_EPOCH_ASSERT(sc);
4389
4390 new_link = LINK_STATE_DOWN;
4391 hasls = 0;
4392 /* Our link is considered up if at least one of our ports is active */
4393 CK_LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
4394 if (bif->bif_ifp->if_capabilities & IFCAP_LINKSTATE)
4395 hasls++;
4396 if (bif->bif_ifp->if_link_state == LINK_STATE_UP) {
4397 new_link = LINK_STATE_UP;
4398 break;
4399 }
4400 }
4401 if (!CK_LIST_EMPTY(&sc->sc_iflist) && !hasls) {
4402 /* If no interfaces support link-state then we default to up */
4403 new_link = LINK_STATE_UP;
4404 }
4405 if_link_state_change(sc->sc_ifp, new_link);
4406 }
4407