1 /* $NetBSD: if_bridge.c,v 1.31 2005/06/01 19:45:34 jdc Exp $ */
2
3 /*-
4 * SPDX-License-Identifier: BSD-4-Clause
5 *
6 * Copyright 2001 Wasabi Systems, Inc.
7 * All rights reserved.
8 *
9 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed for the NetBSD Project by
22 * Wasabi Systems, Inc.
23 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
24 * or promote products derived from this software without specific prior
25 * written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
28 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE.
38 */
39
40 /*
41 * Copyright (c) 1999, 2000 Jason L. Wright (jason@thought.net)
42 * All rights reserved.
43 *
44 * Redistribution and use in source and binary forms, with or without
45 * modification, are permitted provided that the following conditions
46 * are met:
47 * 1. Redistributions of source code must retain the above copyright
48 * notice, this list of conditions and the following disclaimer.
49 * 2. Redistributions in binary form must reproduce the above copyright
50 * notice, this list of conditions and the following disclaimer in the
51 * documentation and/or other materials provided with the distribution.
52 *
53 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
54 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
55 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
56 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
57 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
58 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
59 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
60 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
61 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
62 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
63 * POSSIBILITY OF SUCH DAMAGE.
64 *
65 * OpenBSD: if_bridge.c,v 1.60 2001/06/15 03:38:33 itojun Exp
66 */
67
68 /*
69 * Network interface bridge support.
70 *
71 * TODO:
72 *
73 * - Currently only supports Ethernet-like interfaces (Ethernet,
74 * 802.11, VLANs on Ethernet, etc.) Figure out a nice way
75 * to bridge other types of interfaces (maybe consider
76 * heterogeneous bridges).
77 */
78
79 #include "opt_inet.h"
80 #include "opt_inet6.h"
81
82 #define EXTERR_CATEGORY EXTERR_CAT_BRIDGE
83
84 #include <sys/param.h>
85 #include <sys/ctype.h> /* string functions */
86 #include <sys/eventhandler.h>
87 #include <sys/exterrvar.h>
88 #include <sys/jail.h>
89 #include <sys/kernel.h>
90 #include <sys/lock.h>
91 #include <sys/malloc.h>
92 #include <sys/mbuf.h>
93 #include <sys/module.h>
94 #include <sys/mutex.h>
95 #include <sys/priv.h>
96 #include <sys/proc.h>
97 #include <sys/protosw.h>
98 #include <sys/random.h>
99 #include <sys/systm.h>
100 #include <sys/socket.h> /* for net/if.h */
101 #include <sys/sockio.h>
102 #include <sys/syslog.h>
103 #include <sys/sysctl.h>
104 #include <sys/time.h>
105
106 #include <vm/uma.h>
107
108 #include <net/bpf.h>
109 #include <net/if.h>
110 #include <net/if_clone.h>
111 #include <net/if_dl.h>
112 #include <net/if_types.h>
113 #include <net/if_var.h>
114 #include <net/if_private.h>
115 #include <net/pfil.h>
116 #include <net/vnet.h>
117
118 #include <netinet/in.h>
119 #include <netinet/in_systm.h>
120 #include <netinet/in_var.h>
121 #include <netinet/ip.h>
122 #include <netinet/ip_var.h>
123 #ifdef INET6
124 #include <netinet/ip6.h>
125 #include <netinet6/ip6_var.h>
126 #include <netinet6/in6_ifattach.h>
127 #endif
128 #if defined(INET) || defined(INET6)
129 #include <netinet/ip_carp.h>
130 #endif
131 #include <machine/in_cksum.h>
132 #include <netinet/if_ether.h>
133 #include <net/bridgestp.h>
134 #include <net/if_bridgevar.h>
135 #include <net/if_llc.h>
136 #include <net/if_vlan_var.h>
137
138 #include <net/route.h>
139
140 /*
141 * At various points in the code we need to know if we're hooked into the INET
142 * and/or INET6 pfil. Define some macros to do that based on which IP versions
143 * are enabled in the kernel. This avoids littering the rest of the code with
144 * #ifnet INET6 to avoid referencing V_inet6_pfil_head.
145 */
146 #ifdef INET6
147 #define PFIL_HOOKED_IN_INET6 PFIL_HOOKED_IN(V_inet6_pfil_head)
148 #define PFIL_HOOKED_OUT_INET6 PFIL_HOOKED_OUT(V_inet6_pfil_head)
149 #else
150 #define PFIL_HOOKED_IN_INET6 false
151 #define PFIL_HOOKED_OUT_INET6 false
152 #endif
153
154 #ifdef INET
155 #define PFIL_HOOKED_IN_INET PFIL_HOOKED_IN(V_inet_pfil_head)
156 #define PFIL_HOOKED_OUT_INET PFIL_HOOKED_OUT(V_inet_pfil_head)
157 #else
158 #define PFIL_HOOKED_IN_INET false
159 #define PFIL_HOOKED_OUT_INET false
160 #endif
161
162 #define PFIL_HOOKED_IN_46 (PFIL_HOOKED_IN_INET6 || PFIL_HOOKED_IN_INET)
163 #define PFIL_HOOKED_OUT_46 (PFIL_HOOKED_OUT_INET6 || PFIL_HOOKED_OUT_INET)
164
165 /*
166 * Size of the route hash table. Must be a power of two.
167 */
168 #ifndef BRIDGE_RTHASH_SIZE
169 #define BRIDGE_RTHASH_SIZE 1024
170 #endif
171
172 #define BRIDGE_RTHASH_MASK (BRIDGE_RTHASH_SIZE - 1)
173
174 /*
175 * Default maximum number of addresses to cache.
176 */
177 #ifndef BRIDGE_RTABLE_MAX
178 #define BRIDGE_RTABLE_MAX 2000
179 #endif
180
181 /*
182 * Timeout (in seconds) for entries learned dynamically.
183 */
184 #ifndef BRIDGE_RTABLE_TIMEOUT
185 #define BRIDGE_RTABLE_TIMEOUT (20 * 60) /* same as ARP */
186 #endif
187
188 /*
189 * Number of seconds between walks of the route list.
190 */
191 #ifndef BRIDGE_RTABLE_PRUNE_PERIOD
192 #define BRIDGE_RTABLE_PRUNE_PERIOD (5 * 60)
193 #endif
194
195 /*
196 * List of capabilities to possibly mask on the member interface.
197 */
198 #define BRIDGE_IFCAPS_MASK (IFCAP_TOE|IFCAP_TSO|IFCAP_TXCSUM|\
199 IFCAP_TXCSUM_IPV6|IFCAP_MEXTPG)
200
201 /*
202 * List of capabilities to strip
203 */
204 #define BRIDGE_IFCAPS_STRIP IFCAP_LRO
205
206 /*
207 * Bridge locking
208 *
209 * The bridge relies heavily on the epoch(9) system to protect its data
210 * structures. This means we can safely use CK_LISTs while in NET_EPOCH, but we
211 * must ensure there is only one writer at a time.
212 *
213 * That is: for read accesses we only need to be in NET_EPOCH, but for write
214 * accesses we must hold:
215 *
216 * - BRIDGE_RT_LOCK, for any change to bridge_rtnodes
217 * - BRIDGE_LOCK, for any other change
218 *
219 * The BRIDGE_LOCK is a sleepable lock, because it is held across ioctl()
220 * calls to bridge member interfaces and these ioctl()s can sleep.
221 * The BRIDGE_RT_LOCK is a non-sleepable mutex, because it is sometimes
222 * required while we're in NET_EPOCH and then we're not allowed to sleep.
223 */
224 #define BRIDGE_LOCK_INIT(_sc) do { \
225 sx_init(&(_sc)->sc_sx, "if_bridge"); \
226 mtx_init(&(_sc)->sc_rt_mtx, "if_bridge rt", NULL, MTX_DEF); \
227 } while (0)
228 #define BRIDGE_LOCK_DESTROY(_sc) do { \
229 sx_destroy(&(_sc)->sc_sx); \
230 mtx_destroy(&(_sc)->sc_rt_mtx); \
231 } while (0)
232 #define BRIDGE_LOCK(_sc) sx_xlock(&(_sc)->sc_sx)
233 #define BRIDGE_UNLOCK(_sc) sx_xunlock(&(_sc)->sc_sx)
234 #define BRIDGE_LOCK_ASSERT(_sc) sx_assert(&(_sc)->sc_sx, SX_XLOCKED)
235 #define BRIDGE_LOCK_OR_NET_EPOCH_ASSERT(_sc) \
236 MPASS(in_epoch(net_epoch_preempt) || sx_xlocked(&(_sc)->sc_sx))
237 #define BRIDGE_UNLOCK_ASSERT(_sc) sx_assert(&(_sc)->sc_sx, SX_UNLOCKED)
238 #define BRIDGE_RT_LOCK(_sc) mtx_lock(&(_sc)->sc_rt_mtx)
239 #define BRIDGE_RT_UNLOCK(_sc) mtx_unlock(&(_sc)->sc_rt_mtx)
240 #define BRIDGE_RT_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->sc_rt_mtx, MA_OWNED)
241 #define BRIDGE_RT_LOCK_OR_NET_EPOCH_ASSERT(_sc) \
242 MPASS(in_epoch(net_epoch_preempt) || mtx_owned(&(_sc)->sc_rt_mtx))
243
244 struct bridge_softc;
245
246 /*
247 * Bridge interface list entry.
248 */
249 struct bridge_iflist {
250 CK_LIST_ENTRY(bridge_iflist) bif_next;
251 struct ifnet *bif_ifp; /* member if */
252 struct bridge_softc *bif_sc; /* parent bridge */
253 struct bstp_port bif_stp; /* STP state */
254 uint32_t bif_flags; /* member if flags */
255 int bif_savedcaps; /* saved capabilities */
256 uint32_t bif_addrmax; /* max # of addresses */
257 uint32_t bif_addrcnt; /* cur. # of addresses */
258 uint32_t bif_addrexceeded;/* # of address violations */
259 struct epoch_context bif_epoch_ctx;
260 ether_vlanid_t bif_pvid; /* port vlan id */
261 ifbvlan_set_t bif_vlan_set; /* if allowed tagged vlans */
262 uint16_t bif_vlanproto; /* vlan protocol */
263 };
264
265 /*
266 * Bridge route node.
267 */
268 struct bridge_rtnode {
269 CK_LIST_ENTRY(bridge_rtnode) brt_hash; /* hash table linkage */
270 CK_LIST_ENTRY(bridge_rtnode) brt_list; /* list linkage */
271 struct bridge_iflist *brt_dst; /* destination if */
272 unsigned long brt_expire; /* expiration time */
273 uint8_t brt_flags; /* address flags */
274 uint8_t brt_addr[ETHER_ADDR_LEN];
275 ether_vlanid_t brt_vlan; /* vlan id */
276 struct vnet *brt_vnet;
277 struct epoch_context brt_epoch_ctx;
278 };
279 #define brt_ifp brt_dst->bif_ifp
280
281 /*
282 * Software state for each bridge.
283 */
284 struct bridge_softc {
285 struct ifnet *sc_ifp; /* make this an interface */
286 LIST_ENTRY(bridge_softc) sc_list;
287 struct sx sc_sx;
288 struct mtx sc_rt_mtx;
289 uint32_t sc_brtmax; /* max # of addresses */
290 uint32_t sc_brtcnt; /* cur. # of addresses */
291 uint32_t sc_brttimeout; /* rt timeout in seconds */
292 struct callout sc_brcallout; /* bridge callout */
293 CK_LIST_HEAD(, bridge_iflist) sc_iflist; /* member interface list */
294 CK_LIST_HEAD(, bridge_rtnode) *sc_rthash; /* our forwarding table */
295 CK_LIST_HEAD(, bridge_rtnode) sc_rtlist; /* list version of above */
296 uint32_t sc_rthash_key; /* key for hash */
297 CK_LIST_HEAD(, bridge_iflist) sc_spanlist; /* span ports list */
298 struct bstp_state sc_stp; /* STP state */
299 uint32_t sc_brtexceeded; /* # of cache drops */
300 struct ifnet *sc_ifaddr; /* member mac copied from */
301 struct ether_addr sc_defaddr; /* Default MAC address */
302 if_input_fn_t sc_if_input; /* Saved copy of if_input */
303 struct epoch_context sc_epoch_ctx;
304 ifbr_flags_t sc_flags; /* bridge flags */
305 ether_vlanid_t sc_defpvid; /* default PVID */
306 };
307
308 VNET_DEFINE_STATIC(struct sx, bridge_list_sx);
309 #define V_bridge_list_sx VNET(bridge_list_sx)
310 static eventhandler_tag bridge_detach_cookie;
311
312 int bridge_rtable_prune_period = BRIDGE_RTABLE_PRUNE_PERIOD;
313
314 VNET_DEFINE_STATIC(uma_zone_t, bridge_rtnode_zone);
315 #define V_bridge_rtnode_zone VNET(bridge_rtnode_zone)
316
317 static int bridge_clone_create(struct if_clone *, char *, size_t,
318 struct ifc_data *, struct ifnet **);
319 static int bridge_clone_destroy(struct if_clone *, struct ifnet *, uint32_t);
320
321 static int bridge_ioctl(struct ifnet *, u_long, caddr_t);
322 static void bridge_mutecaps(struct bridge_softc *);
323 static void bridge_set_ifcap(struct bridge_softc *, struct bridge_iflist *,
324 int);
325 static void bridge_ifdetach(void *arg __unused, struct ifnet *);
326 static void bridge_init(void *);
327 static void bridge_dummynet(struct mbuf *, struct ifnet *);
328 static bool bridge_same(const void *, const void *);
329 static void *bridge_get_softc(struct ifnet *);
330 static void bridge_stop(struct ifnet *, int);
331 static int bridge_transmit(struct ifnet *, struct mbuf *);
332 #ifdef ALTQ
333 static void bridge_altq_start(if_t);
334 static int bridge_altq_transmit(if_t, struct mbuf *);
335 #endif
336 static void bridge_qflush(struct ifnet *);
337 static struct mbuf *bridge_input(struct ifnet *, struct mbuf *);
338 static void bridge_inject(struct ifnet *, struct mbuf *);
339 static int bridge_output(struct ifnet *, struct mbuf *, struct sockaddr *,
340 struct rtentry *);
341 static int bridge_enqueue(struct bridge_softc *, struct ifnet *,
342 struct mbuf *, struct bridge_iflist *);
343 static void bridge_rtdelete(struct bridge_softc *, struct ifnet *ifp, int);
344
345 static void bridge_forward(struct bridge_softc *, struct bridge_iflist *,
346 struct mbuf *m);
347 static bool bridge_member_ifaddrs(void);
348 static void bridge_timer(void *);
349
350 static void bridge_broadcast(struct bridge_softc *, struct ifnet *,
351 struct mbuf *, int);
352 static void bridge_span(struct bridge_softc *, struct mbuf *);
353
354 static int bridge_rtupdate(struct bridge_softc *, const uint8_t *,
355 ether_vlanid_t, struct bridge_iflist *, int, uint8_t);
356 static struct ifnet *bridge_rtlookup(struct bridge_softc *, const uint8_t *,
357 ether_vlanid_t);
358 static void bridge_rttrim(struct bridge_softc *);
359 static void bridge_rtage(struct bridge_softc *);
360 static void bridge_rtflush(struct bridge_softc *, int);
361 static int bridge_rtdaddr(struct bridge_softc *, const uint8_t *,
362 ether_vlanid_t);
363 static bool bridge_vfilter_in(const struct bridge_iflist *, struct mbuf *);
364 static bool bridge_vfilter_out(const struct bridge_iflist *,
365 const struct mbuf *);
366
367 static void bridge_rtable_init(struct bridge_softc *);
368 static void bridge_rtable_fini(struct bridge_softc *);
369
370 static int bridge_rtnode_addr_cmp(const uint8_t *, const uint8_t *);
371 static struct bridge_rtnode *bridge_rtnode_lookup(struct bridge_softc *,
372 const uint8_t *, ether_vlanid_t);
373 static int bridge_rtnode_insert(struct bridge_softc *,
374 struct bridge_rtnode *);
375 static void bridge_rtnode_destroy(struct bridge_softc *,
376 struct bridge_rtnode *);
377 static void bridge_rtable_expire(struct ifnet *, int);
378 static void bridge_state_change(struct ifnet *, int);
379
380 static struct bridge_iflist *bridge_lookup_member(struct bridge_softc *,
381 const char *name);
382 static struct bridge_iflist *bridge_lookup_member_if(struct bridge_softc *,
383 struct ifnet *ifp);
384 static void bridge_delete_member(struct bridge_softc *,
385 struct bridge_iflist *, int);
386 static void bridge_delete_span(struct bridge_softc *,
387 struct bridge_iflist *);
388
389 static int bridge_ioctl_add(struct bridge_softc *, void *);
390 static int bridge_ioctl_del(struct bridge_softc *, void *);
391 static int bridge_ioctl_gifflags(struct bridge_softc *, void *);
392 static int bridge_ioctl_sifflags(struct bridge_softc *, void *);
393 static int bridge_ioctl_scache(struct bridge_softc *, void *);
394 static int bridge_ioctl_gcache(struct bridge_softc *, void *);
395 static int bridge_ioctl_gifs(struct bridge_softc *, void *);
396 static int bridge_ioctl_rts(struct bridge_softc *, void *);
397 static int bridge_ioctl_saddr(struct bridge_softc *, void *);
398 static int bridge_ioctl_sto(struct bridge_softc *, void *);
399 static int bridge_ioctl_gto(struct bridge_softc *, void *);
400 static int bridge_ioctl_daddr(struct bridge_softc *, void *);
401 static int bridge_ioctl_flush(struct bridge_softc *, void *);
402 static int bridge_ioctl_gpri(struct bridge_softc *, void *);
403 static int bridge_ioctl_spri(struct bridge_softc *, void *);
404 static int bridge_ioctl_ght(struct bridge_softc *, void *);
405 static int bridge_ioctl_sht(struct bridge_softc *, void *);
406 static int bridge_ioctl_gfd(struct bridge_softc *, void *);
407 static int bridge_ioctl_sfd(struct bridge_softc *, void *);
408 static int bridge_ioctl_gma(struct bridge_softc *, void *);
409 static int bridge_ioctl_sma(struct bridge_softc *, void *);
410 static int bridge_ioctl_sifprio(struct bridge_softc *, void *);
411 static int bridge_ioctl_sifcost(struct bridge_softc *, void *);
412 static int bridge_ioctl_sifmaxaddr(struct bridge_softc *, void *);
413 static int bridge_ioctl_sifpvid(struct bridge_softc *, void *);
414 static int bridge_ioctl_sifvlanset(struct bridge_softc *, void *);
415 static int bridge_ioctl_gifvlanset(struct bridge_softc *, void *);
416 static int bridge_ioctl_addspan(struct bridge_softc *, void *);
417 static int bridge_ioctl_delspan(struct bridge_softc *, void *);
418 static int bridge_ioctl_gbparam(struct bridge_softc *, void *);
419 static int bridge_ioctl_grte(struct bridge_softc *, void *);
420 static int bridge_ioctl_gifsstp(struct bridge_softc *, void *);
421 static int bridge_ioctl_sproto(struct bridge_softc *, void *);
422 static int bridge_ioctl_stxhc(struct bridge_softc *, void *);
423 static int bridge_ioctl_gflags(struct bridge_softc *, void *);
424 static int bridge_ioctl_sflags(struct bridge_softc *, void *);
425 static int bridge_ioctl_gdefpvid(struct bridge_softc *, void *);
426 static int bridge_ioctl_sdefpvid(struct bridge_softc *, void *);
427 static int bridge_ioctl_svlanproto(struct bridge_softc *, void *);
428 static int bridge_pfil(struct mbuf **, struct ifnet *, struct ifnet *,
429 int);
430 #ifdef INET
431 static int bridge_ip_checkbasic(struct mbuf **mp);
432 static int bridge_fragment(struct ifnet *, struct mbuf **mp,
433 struct ether_header *, int, struct llc *);
434 #endif /* INET */
435 #ifdef INET6
436 static int bridge_ip6_checkbasic(struct mbuf **mp);
437 #endif /* INET6 */
438 static void bridge_linkstate(struct ifnet *ifp);
439 static void bridge_linkcheck(struct bridge_softc *sc);
440
441 /*
442 * Use the "null" value from IEEE 802.1Q-2014 Table 9-2
443 * to indicate untagged frames.
444 */
445 #define VLANTAGOF(_m) \
446 ((_m->m_flags & M_VLANTAG) ? EVL_VLANOFTAG(_m->m_pkthdr.ether_vtag) : DOT1Q_VID_NULL)
447
448 static struct bstp_cb_ops bridge_ops = {
449 .bcb_state = bridge_state_change,
450 .bcb_rtage = bridge_rtable_expire
451 };
452
453 SYSCTL_DECL(_net_link);
454 static SYSCTL_NODE(_net_link, IFT_BRIDGE, bridge, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
455 "Bridge");
456
457 /* only pass IP[46] packets when pfil is enabled */
458 VNET_DEFINE_STATIC(int, pfil_onlyip) = 1;
459 #define V_pfil_onlyip VNET(pfil_onlyip)
460 SYSCTL_INT(_net_link_bridge, OID_AUTO, pfil_onlyip,
461 CTLFLAG_RWTUN | CTLFLAG_VNET, &VNET_NAME(pfil_onlyip), 0,
462 "Only pass IP packets when pfil is enabled");
463
464 /* run pfil hooks on the bridge interface */
465 VNET_DEFINE_STATIC(int, pfil_bridge) = 0;
466 #define V_pfil_bridge VNET(pfil_bridge)
467 SYSCTL_INT(_net_link_bridge, OID_AUTO, pfil_bridge,
468 CTLFLAG_RWTUN | CTLFLAG_VNET, &VNET_NAME(pfil_bridge), 0,
469 "Packet filter on the bridge interface");
470
471 /* layer2 filter with ipfw */
472 VNET_DEFINE_STATIC(int, pfil_ipfw);
473 #define V_pfil_ipfw VNET(pfil_ipfw)
474
475 /* layer2 ARP filter with ipfw */
476 VNET_DEFINE_STATIC(int, pfil_ipfw_arp);
477 #define V_pfil_ipfw_arp VNET(pfil_ipfw_arp)
478 SYSCTL_INT(_net_link_bridge, OID_AUTO, ipfw_arp,
479 CTLFLAG_RWTUN | CTLFLAG_VNET, &VNET_NAME(pfil_ipfw_arp), 0,
480 "Filter ARP packets through IPFW layer2");
481
482 /* run pfil hooks on the member interface */
483 VNET_DEFINE_STATIC(int, pfil_member) = 0;
484 #define V_pfil_member VNET(pfil_member)
485 SYSCTL_INT(_net_link_bridge, OID_AUTO, pfil_member,
486 CTLFLAG_RWTUN | CTLFLAG_VNET, &VNET_NAME(pfil_member), 0,
487 "Packet filter on the member interface");
488
489 /* run pfil hooks on the physical interface for locally destined packets */
490 VNET_DEFINE_STATIC(int, pfil_local_phys);
491 #define V_pfil_local_phys VNET(pfil_local_phys)
492 SYSCTL_INT(_net_link_bridge, OID_AUTO, pfil_local_phys,
493 CTLFLAG_RWTUN | CTLFLAG_VNET, &VNET_NAME(pfil_local_phys), 0,
494 "Packet filter on the physical interface for locally destined packets");
495
496 /* log STP state changes */
497 VNET_DEFINE_STATIC(int, log_stp);
498 #define V_log_stp VNET(log_stp)
499 SYSCTL_INT(_net_link_bridge, OID_AUTO, log_stp,
500 CTLFLAG_RWTUN | CTLFLAG_VNET, &VNET_NAME(log_stp), 0,
501 "Log STP state changes");
502
503 /* share MAC with first bridge member */
504 VNET_DEFINE_STATIC(int, bridge_inherit_mac);
505 #define V_bridge_inherit_mac VNET(bridge_inherit_mac)
506 SYSCTL_INT(_net_link_bridge, OID_AUTO, inherit_mac,
507 CTLFLAG_RWTUN | CTLFLAG_VNET, &VNET_NAME(bridge_inherit_mac), 0,
508 "Inherit MAC address from the first bridge member");
509
510 VNET_DEFINE_STATIC(int, allow_llz_overlap) = 0;
511 #define V_allow_llz_overlap VNET(allow_llz_overlap)
512 SYSCTL_INT(_net_link_bridge, OID_AUTO, allow_llz_overlap,
513 CTLFLAG_RW | CTLFLAG_VNET, &VNET_NAME(allow_llz_overlap), 0,
514 "Allow overlap of link-local scope "
515 "zones of a bridge interface and the member interfaces");
516
517 /* log MAC address port flapping */
518 VNET_DEFINE_STATIC(bool, log_mac_flap) = true;
519 #define V_log_mac_flap VNET(log_mac_flap)
520 SYSCTL_BOOL(_net_link_bridge, OID_AUTO, log_mac_flap,
521 CTLFLAG_RW | CTLFLAG_VNET, &VNET_NAME(log_mac_flap), true,
522 "Log MAC address port flapping");
523
524 /* allow IP addresses on bridge members */
525 VNET_DEFINE_STATIC(bool, member_ifaddrs) = true;
526 #define V_member_ifaddrs VNET(member_ifaddrs)
527 SYSCTL_BOOL(_net_link_bridge, OID_AUTO, member_ifaddrs,
528 CTLFLAG_RW | CTLFLAG_VNET, &VNET_NAME(member_ifaddrs), false,
529 "Allow layer 3 addresses on bridge members (deprecated)");
530
531 static bool
bridge_member_ifaddrs(void)532 bridge_member_ifaddrs(void)
533 {
534 return (V_member_ifaddrs);
535 }
536
537 VNET_DEFINE_STATIC(int, log_interval) = 5;
538 VNET_DEFINE_STATIC(int, log_count) = 0;
539 VNET_DEFINE_STATIC(struct timeval, log_last) = { 0 };
540
541 #define V_log_interval VNET(log_interval)
542 #define V_log_count VNET(log_count)
543 #define V_log_last VNET(log_last)
544
545 struct bridge_control {
546 int (*bc_func)(struct bridge_softc *, void *);
547 int bc_argsize;
548 int bc_flags;
549 };
550
551 #define BC_F_COPYIN 0x01 /* copy arguments in */
552 #define BC_F_COPYOUT 0x02 /* copy arguments out */
553 #define BC_F_SUSER 0x04 /* do super-user check */
554
555 static const struct bridge_control bridge_control_table[] = {
556 { bridge_ioctl_add, sizeof(struct ifbreq),
557 BC_F_COPYIN|BC_F_SUSER },
558 { bridge_ioctl_del, sizeof(struct ifbreq),
559 BC_F_COPYIN|BC_F_SUSER },
560
561 { bridge_ioctl_gifflags, sizeof(struct ifbreq),
562 BC_F_COPYIN|BC_F_COPYOUT },
563 { bridge_ioctl_sifflags, sizeof(struct ifbreq),
564 BC_F_COPYIN|BC_F_SUSER },
565
566 { bridge_ioctl_scache, sizeof(struct ifbrparam),
567 BC_F_COPYIN|BC_F_SUSER },
568 { bridge_ioctl_gcache, sizeof(struct ifbrparam),
569 BC_F_COPYOUT },
570
571 { bridge_ioctl_gifs, sizeof(struct ifbifconf),
572 BC_F_COPYIN|BC_F_COPYOUT },
573 { bridge_ioctl_rts, sizeof(struct ifbaconf),
574 BC_F_COPYIN|BC_F_COPYOUT },
575
576 { bridge_ioctl_saddr, sizeof(struct ifbareq),
577 BC_F_COPYIN|BC_F_SUSER },
578
579 { bridge_ioctl_sto, sizeof(struct ifbrparam),
580 BC_F_COPYIN|BC_F_SUSER },
581 { bridge_ioctl_gto, sizeof(struct ifbrparam),
582 BC_F_COPYOUT },
583
584 { bridge_ioctl_daddr, sizeof(struct ifbareq),
585 BC_F_COPYIN|BC_F_SUSER },
586
587 { bridge_ioctl_flush, sizeof(struct ifbreq),
588 BC_F_COPYIN|BC_F_SUSER },
589
590 { bridge_ioctl_gpri, sizeof(struct ifbrparam),
591 BC_F_COPYOUT },
592 { bridge_ioctl_spri, sizeof(struct ifbrparam),
593 BC_F_COPYIN|BC_F_SUSER },
594
595 { bridge_ioctl_ght, sizeof(struct ifbrparam),
596 BC_F_COPYOUT },
597 { bridge_ioctl_sht, sizeof(struct ifbrparam),
598 BC_F_COPYIN|BC_F_SUSER },
599
600 { bridge_ioctl_gfd, sizeof(struct ifbrparam),
601 BC_F_COPYOUT },
602 { bridge_ioctl_sfd, sizeof(struct ifbrparam),
603 BC_F_COPYIN|BC_F_SUSER },
604
605 { bridge_ioctl_gma, sizeof(struct ifbrparam),
606 BC_F_COPYOUT },
607 { bridge_ioctl_sma, sizeof(struct ifbrparam),
608 BC_F_COPYIN|BC_F_SUSER },
609
610 { bridge_ioctl_sifprio, sizeof(struct ifbreq),
611 BC_F_COPYIN|BC_F_SUSER },
612
613 { bridge_ioctl_sifcost, sizeof(struct ifbreq),
614 BC_F_COPYIN|BC_F_SUSER },
615
616 { bridge_ioctl_addspan, sizeof(struct ifbreq),
617 BC_F_COPYIN|BC_F_SUSER },
618 { bridge_ioctl_delspan, sizeof(struct ifbreq),
619 BC_F_COPYIN|BC_F_SUSER },
620
621 { bridge_ioctl_gbparam, sizeof(struct ifbropreq),
622 BC_F_COPYOUT },
623
624 { bridge_ioctl_grte, sizeof(struct ifbrparam),
625 BC_F_COPYOUT },
626
627 { bridge_ioctl_gifsstp, sizeof(struct ifbpstpconf),
628 BC_F_COPYIN|BC_F_COPYOUT },
629
630 { bridge_ioctl_sproto, sizeof(struct ifbrparam),
631 BC_F_COPYIN|BC_F_SUSER },
632
633 { bridge_ioctl_stxhc, sizeof(struct ifbrparam),
634 BC_F_COPYIN|BC_F_SUSER },
635
636 { bridge_ioctl_sifmaxaddr, sizeof(struct ifbreq),
637 BC_F_COPYIN|BC_F_SUSER },
638
639 { bridge_ioctl_sifpvid, sizeof(struct ifbreq),
640 BC_F_COPYIN|BC_F_SUSER },
641
642 { bridge_ioctl_sifvlanset, sizeof(struct ifbif_vlan_req),
643 BC_F_COPYIN|BC_F_SUSER },
644
645 { bridge_ioctl_gifvlanset, sizeof(struct ifbif_vlan_req),
646 BC_F_COPYIN|BC_F_COPYOUT },
647
648 { bridge_ioctl_gflags, sizeof(struct ifbrparam),
649 BC_F_COPYOUT },
650
651 { bridge_ioctl_sflags, sizeof(struct ifbrparam),
652 BC_F_COPYIN|BC_F_SUSER },
653
654 { bridge_ioctl_gdefpvid, sizeof(struct ifbrparam),
655 BC_F_COPYOUT },
656
657 { bridge_ioctl_sdefpvid, sizeof(struct ifbrparam),
658 BC_F_COPYIN|BC_F_SUSER },
659
660 { bridge_ioctl_svlanproto, sizeof(struct ifbreq),
661 BC_F_COPYIN|BC_F_SUSER },
662 };
663 static const int bridge_control_table_size = nitems(bridge_control_table);
664
665 VNET_DEFINE_STATIC(LIST_HEAD(, bridge_softc), bridge_list) =
666 LIST_HEAD_INITIALIZER();
667 #define V_bridge_list VNET(bridge_list)
668 #define BRIDGE_LIST_LOCK_INIT(x) sx_init(&V_bridge_list_sx, \
669 "if_bridge list")
670 #define BRIDGE_LIST_LOCK_DESTROY(x) sx_destroy(&V_bridge_list_sx)
671 #define BRIDGE_LIST_LOCK(x) sx_xlock(&V_bridge_list_sx)
672 #define BRIDGE_LIST_UNLOCK(x) sx_xunlock(&V_bridge_list_sx)
673
674 VNET_DEFINE_STATIC(struct if_clone *, bridge_cloner);
675 #define V_bridge_cloner VNET(bridge_cloner)
676
677 static const char bridge_name[] = "bridge";
678
679 static void
vnet_bridge_init(const void * unused __unused)680 vnet_bridge_init(const void *unused __unused)
681 {
682
683 V_bridge_rtnode_zone = uma_zcreate("bridge_rtnode",
684 sizeof(struct bridge_rtnode), NULL, NULL, NULL, NULL,
685 UMA_ALIGN_PTR, 0);
686 BRIDGE_LIST_LOCK_INIT();
687
688 struct if_clone_addreq req = {
689 .create_f = bridge_clone_create,
690 .destroy_f = bridge_clone_destroy,
691 .flags = IFC_F_AUTOUNIT,
692 };
693 V_bridge_cloner = ifc_attach_cloner(bridge_name, &req);
694 }
695 VNET_SYSINIT(vnet_bridge_init, SI_SUB_PROTO_IFATTACHDOMAIN, SI_ORDER_ANY,
696 vnet_bridge_init, NULL);
697
698 static void
vnet_bridge_uninit(const void * unused __unused)699 vnet_bridge_uninit(const void *unused __unused)
700 {
701
702 ifc_detach_cloner(V_bridge_cloner);
703 V_bridge_cloner = NULL;
704 BRIDGE_LIST_LOCK_DESTROY();
705
706 /* Callbacks may use the UMA zone. */
707 NET_EPOCH_DRAIN_CALLBACKS();
708
709 uma_zdestroy(V_bridge_rtnode_zone);
710 }
711 VNET_SYSUNINIT(vnet_bridge_uninit, SI_SUB_PSEUDO, SI_ORDER_ANY,
712 vnet_bridge_uninit, NULL);
713
714 static int
bridge_modevent(module_t mod,int type,void * data)715 bridge_modevent(module_t mod, int type, void *data)
716 {
717
718 switch (type) {
719 case MOD_LOAD:
720 bridge_dn_p = bridge_dummynet;
721 bridge_same_p = bridge_same;
722 bridge_get_softc_p = bridge_get_softc;
723 bridge_member_ifaddrs_p = bridge_member_ifaddrs;
724 bridge_detach_cookie = EVENTHANDLER_REGISTER(
725 ifnet_departure_event, bridge_ifdetach, NULL,
726 EVENTHANDLER_PRI_ANY);
727 break;
728 case MOD_UNLOAD:
729 EVENTHANDLER_DEREGISTER(ifnet_departure_event,
730 bridge_detach_cookie);
731 bridge_dn_p = NULL;
732 bridge_same_p = NULL;
733 bridge_get_softc_p = NULL;
734 bridge_member_ifaddrs_p = NULL;
735 break;
736 default:
737 return (EOPNOTSUPP);
738 }
739 return (0);
740 }
741
742 static moduledata_t bridge_mod = {
743 "if_bridge",
744 bridge_modevent,
745 0
746 };
747
748 DECLARE_MODULE(if_bridge, bridge_mod, SI_SUB_PSEUDO, SI_ORDER_ANY);
749 MODULE_VERSION(if_bridge, 1);
750 MODULE_DEPEND(if_bridge, bridgestp, 1, 1, 1);
751
752 /*
753 * handler for net.link.bridge.ipfw
754 */
755 static int
sysctl_pfil_ipfw(SYSCTL_HANDLER_ARGS)756 sysctl_pfil_ipfw(SYSCTL_HANDLER_ARGS)
757 {
758 int enable = V_pfil_ipfw;
759 int error;
760
761 error = sysctl_handle_int(oidp, &enable, 0, req);
762 enable &= 1;
763
764 if (enable != V_pfil_ipfw) {
765 V_pfil_ipfw = enable;
766
767 /*
768 * Disable pfil so that ipfw doesnt run twice, if the user
769 * really wants both then they can re-enable pfil_bridge and/or
770 * pfil_member. Also allow non-ip packets as ipfw can filter by
771 * layer2 type.
772 */
773 if (V_pfil_ipfw) {
774 V_pfil_onlyip = 0;
775 V_pfil_bridge = 0;
776 V_pfil_member = 0;
777 }
778 }
779
780 return (error);
781 }
782 SYSCTL_PROC(_net_link_bridge, OID_AUTO, ipfw,
783 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_VNET | CTLFLAG_NEEDGIANT,
784 &VNET_NAME(pfil_ipfw), 0, &sysctl_pfil_ipfw, "I",
785 "Layer2 filter with IPFW");
786
787 #ifdef VIMAGE
788 static void
bridge_reassign(struct ifnet * ifp,struct vnet * newvnet,char * arg)789 bridge_reassign(struct ifnet *ifp, struct vnet *newvnet, char *arg)
790 {
791 struct bridge_softc *sc = ifp->if_softc;
792 struct bridge_iflist *bif;
793
794 BRIDGE_LOCK(sc);
795
796 while ((bif = CK_LIST_FIRST(&sc->sc_iflist)) != NULL)
797 bridge_delete_member(sc, bif, 0);
798
799 while ((bif = CK_LIST_FIRST(&sc->sc_spanlist)) != NULL) {
800 bridge_delete_span(sc, bif);
801 }
802
803 BRIDGE_UNLOCK(sc);
804
805 ether_reassign(ifp, newvnet, arg);
806 }
807 #endif
808
809 /*
810 * bridge_get_softc:
811 *
812 * Return the bridge softc for an ifnet.
813 */
814 static void *
bridge_get_softc(struct ifnet * ifp)815 bridge_get_softc(struct ifnet *ifp)
816 {
817 struct bridge_iflist *bif;
818
819 NET_EPOCH_ASSERT();
820
821 bif = ifp->if_bridge;
822 if (bif == NULL)
823 return (NULL);
824 return (bif->bif_sc);
825 }
826
827 /*
828 * bridge_same:
829 *
830 * Return true if two interfaces are in the same bridge. This is only used by
831 * bridgestp via bridge_same_p.
832 */
833 static bool
bridge_same(const void * bifap,const void * bifbp)834 bridge_same(const void *bifap, const void *bifbp)
835 {
836 const struct bridge_iflist *bifa = bifap, *bifb = bifbp;
837
838 NET_EPOCH_ASSERT();
839
840 if (bifa == NULL || bifb == NULL)
841 return (false);
842
843 return (bifa->bif_sc == bifb->bif_sc);
844 }
845
846 /*
847 * bridge_clone_create:
848 *
849 * Create a new bridge instance.
850 */
851 static int
bridge_clone_create(struct if_clone * ifc,char * name,size_t len,struct ifc_data * ifd,struct ifnet ** ifpp)852 bridge_clone_create(struct if_clone *ifc, char *name, size_t len,
853 struct ifc_data *ifd, struct ifnet **ifpp)
854 {
855 struct bridge_softc *sc;
856 struct ifnet *ifp;
857
858 sc = malloc(sizeof(*sc), M_DEVBUF, M_WAITOK|M_ZERO);
859 ifp = sc->sc_ifp = if_alloc(IFT_ETHER);
860
861 BRIDGE_LOCK_INIT(sc);
862 sc->sc_brtmax = BRIDGE_RTABLE_MAX;
863 sc->sc_brttimeout = BRIDGE_RTABLE_TIMEOUT;
864
865 /* Initialize our routing table. */
866 bridge_rtable_init(sc);
867
868 callout_init_mtx(&sc->sc_brcallout, &sc->sc_rt_mtx, 0);
869
870 CK_LIST_INIT(&sc->sc_iflist);
871 CK_LIST_INIT(&sc->sc_spanlist);
872
873 ifp->if_softc = sc;
874 if_initname(ifp, bridge_name, ifd->unit);
875 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
876 ifp->if_capabilities = ifp->if_capenable = IFCAP_VLAN_HWTAGGING;
877 ifp->if_ioctl = bridge_ioctl;
878 #ifdef ALTQ
879 ifp->if_start = bridge_altq_start;
880 ifp->if_transmit = bridge_altq_transmit;
881 IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen);
882 ifp->if_snd.ifq_drv_maxlen = 0;
883 IFQ_SET_READY(&ifp->if_snd);
884 #else
885 ifp->if_transmit = bridge_transmit;
886 #endif
887 ifp->if_qflush = bridge_qflush;
888 ifp->if_init = bridge_init;
889 ifp->if_type = IFT_BRIDGE;
890
891 ether_gen_addr(ifp, &sc->sc_defaddr);
892
893 bstp_attach(&sc->sc_stp, &bridge_ops);
894 ether_ifattach(ifp, sc->sc_defaddr.octet);
895 /* Now undo some of the damage... */
896 ifp->if_baudrate = 0;
897 #ifdef VIMAGE
898 ifp->if_reassign = bridge_reassign;
899 #endif
900 sc->sc_if_input = ifp->if_input; /* ether_input */
901 ifp->if_input = bridge_inject;
902
903 /*
904 * Allow BRIDGE_INPUT() to pass in packets originating from the bridge
905 * itself via bridge_inject(). This is required for netmap but
906 * otherwise has no effect.
907 */
908 ifp->if_bridge_input = bridge_input;
909
910 BRIDGE_LIST_LOCK();
911 LIST_INSERT_HEAD(&V_bridge_list, sc, sc_list);
912 BRIDGE_LIST_UNLOCK();
913 *ifpp = ifp;
914
915 return (0);
916 }
917
918 static void
bridge_clone_destroy_cb(struct epoch_context * ctx)919 bridge_clone_destroy_cb(struct epoch_context *ctx)
920 {
921 struct bridge_softc *sc;
922
923 sc = __containerof(ctx, struct bridge_softc, sc_epoch_ctx);
924
925 BRIDGE_LOCK_DESTROY(sc);
926 free(sc, M_DEVBUF);
927 }
928
929 /*
930 * bridge_clone_destroy:
931 *
932 * Destroy a bridge instance.
933 */
934 static int
bridge_clone_destroy(struct if_clone * ifc,struct ifnet * ifp,uint32_t flags)935 bridge_clone_destroy(struct if_clone *ifc, struct ifnet *ifp, uint32_t flags)
936 {
937 struct bridge_softc *sc = ifp->if_softc;
938 struct bridge_iflist *bif;
939 struct epoch_tracker et;
940
941 BRIDGE_LOCK(sc);
942
943 bridge_stop(ifp, 1);
944 ifp->if_flags &= ~IFF_UP;
945
946 while ((bif = CK_LIST_FIRST(&sc->sc_iflist)) != NULL)
947 bridge_delete_member(sc, bif, 0);
948
949 while ((bif = CK_LIST_FIRST(&sc->sc_spanlist)) != NULL) {
950 bridge_delete_span(sc, bif);
951 }
952
953 /* Tear down the routing table. */
954 bridge_rtable_fini(sc);
955
956 BRIDGE_UNLOCK(sc);
957
958 NET_EPOCH_ENTER(et);
959
960 callout_drain(&sc->sc_brcallout);
961
962 BRIDGE_LIST_LOCK();
963 LIST_REMOVE(sc, sc_list);
964 BRIDGE_LIST_UNLOCK();
965
966 bstp_detach(&sc->sc_stp);
967 #ifdef ALTQ
968 IFQ_PURGE(&ifp->if_snd);
969 #endif
970 NET_EPOCH_EXIT(et);
971
972 ether_ifdetach(ifp);
973 if_free(ifp);
974
975 NET_EPOCH_CALL(bridge_clone_destroy_cb, &sc->sc_epoch_ctx);
976
977 return (0);
978 }
979
980 /*
981 * bridge_ioctl:
982 *
983 * Handle a control request from the operator.
984 */
985 static int
bridge_ioctl(struct ifnet * ifp,u_long cmd,caddr_t data)986 bridge_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
987 {
988 struct bridge_softc *sc = ifp->if_softc;
989 struct ifreq *ifr = (struct ifreq *)data;
990 struct bridge_iflist *bif;
991 struct thread *td = curthread;
992 union {
993 struct ifbreq ifbreq;
994 struct ifbifconf ifbifconf;
995 struct ifbareq ifbareq;
996 struct ifbaconf ifbaconf;
997 struct ifbrparam ifbrparam;
998 struct ifbropreq ifbropreq;
999 struct ifbif_vlan_req ifvlanreq;
1000 } args;
1001 struct ifdrv *ifd = (struct ifdrv *) data;
1002 const struct bridge_control *bc;
1003 int error = 0, oldmtu;
1004
1005 BRIDGE_LOCK(sc);
1006
1007 switch (cmd) {
1008 case SIOCADDMULTI:
1009 case SIOCDELMULTI:
1010 break;
1011
1012 case SIOCGDRVSPEC:
1013 case SIOCSDRVSPEC:
1014 if (ifd->ifd_cmd >= bridge_control_table_size) {
1015 error = EXTERROR(EINVAL, "Invalid control command");
1016 break;
1017 }
1018 bc = &bridge_control_table[ifd->ifd_cmd];
1019
1020 if (cmd == SIOCGDRVSPEC &&
1021 (bc->bc_flags & BC_F_COPYOUT) == 0) {
1022 error = EXTERROR(EINVAL,
1023 "Inappropriate ioctl for command "
1024 "(expected SIOCSDRVSPEC)");
1025 break;
1026 }
1027 else if (cmd == SIOCSDRVSPEC &&
1028 (bc->bc_flags & BC_F_COPYOUT) != 0) {
1029 error = EXTERROR(EINVAL,
1030 "Inappropriate ioctl for command "
1031 "(expected SIOCGDRVSPEC)");
1032 break;
1033 }
1034
1035 if (bc->bc_flags & BC_F_SUSER) {
1036 error = priv_check(td, PRIV_NET_BRIDGE);
1037 if (error) {
1038 EXTERROR(error, "PRIV_NET_BRIDGE required");
1039 break;
1040 }
1041 }
1042
1043 if (ifd->ifd_len != bc->bc_argsize ||
1044 ifd->ifd_len > sizeof(args)) {
1045 error = EXTERROR(EINVAL, "Invalid argument size");
1046 break;
1047 }
1048
1049 bzero(&args, sizeof(args));
1050 if (bc->bc_flags & BC_F_COPYIN) {
1051 error = copyin(ifd->ifd_data, &args, ifd->ifd_len);
1052 if (error)
1053 break;
1054 }
1055
1056 oldmtu = ifp->if_mtu;
1057 error = (*bc->bc_func)(sc, &args);
1058 if (error)
1059 break;
1060
1061 /*
1062 * Bridge MTU may change during addition of the first port.
1063 * If it did, do network layer specific procedure.
1064 */
1065 if (ifp->if_mtu != oldmtu)
1066 if_notifymtu(ifp);
1067
1068 if (bc->bc_flags & BC_F_COPYOUT)
1069 error = copyout(&args, ifd->ifd_data, ifd->ifd_len);
1070
1071 break;
1072
1073 case SIOCSIFFLAGS:
1074 if (!(ifp->if_flags & IFF_UP) &&
1075 (ifp->if_drv_flags & IFF_DRV_RUNNING)) {
1076 /*
1077 * If interface is marked down and it is running,
1078 * then stop and disable it.
1079 */
1080 bridge_stop(ifp, 1);
1081 } else if ((ifp->if_flags & IFF_UP) &&
1082 !(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
1083 /*
1084 * If interface is marked up and it is stopped, then
1085 * start it.
1086 */
1087 BRIDGE_UNLOCK(sc);
1088 (*ifp->if_init)(sc);
1089 BRIDGE_LOCK(sc);
1090 }
1091 break;
1092
1093 case SIOCSIFMTU:
1094 oldmtu = sc->sc_ifp->if_mtu;
1095
1096 if (ifr->ifr_mtu < IF_MINMTU) {
1097 error = EXTERROR(EINVAL,
1098 "Requested MTU is lower than IF_MINMTU");
1099 break;
1100 }
1101 if (CK_LIST_EMPTY(&sc->sc_iflist)) {
1102 sc->sc_ifp->if_mtu = ifr->ifr_mtu;
1103 break;
1104 }
1105 CK_LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
1106 error = (*bif->bif_ifp->if_ioctl)(bif->bif_ifp,
1107 SIOCSIFMTU, (caddr_t)ifr);
1108 if (error != 0) {
1109 log(LOG_NOTICE, "%s: invalid MTU: %u for"
1110 " member %s\n", sc->sc_ifp->if_xname,
1111 ifr->ifr_mtu,
1112 bif->bif_ifp->if_xname);
1113 error = EINVAL;
1114 break;
1115 }
1116 }
1117 if (error) {
1118 /* Restore the previous MTU on all member interfaces. */
1119 ifr->ifr_mtu = oldmtu;
1120 CK_LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
1121 (*bif->bif_ifp->if_ioctl)(bif->bif_ifp,
1122 SIOCSIFMTU, (caddr_t)ifr);
1123 }
1124 EXTERROR(error,
1125 "Failed to set MTU on member interface");
1126 } else {
1127 sc->sc_ifp->if_mtu = ifr->ifr_mtu;
1128 }
1129 break;
1130 default:
1131 /*
1132 * drop the lock as ether_ioctl() will call bridge_start() and
1133 * cause the lock to be recursed.
1134 */
1135 BRIDGE_UNLOCK(sc);
1136 error = ether_ioctl(ifp, cmd, data);
1137 BRIDGE_LOCK(sc);
1138 break;
1139 }
1140
1141 BRIDGE_UNLOCK(sc);
1142
1143 return (error);
1144 }
1145
1146 /*
1147 * bridge_mutecaps:
1148 *
1149 * Clear or restore unwanted capabilities on the member interface
1150 */
1151 static void
bridge_mutecaps(struct bridge_softc * sc)1152 bridge_mutecaps(struct bridge_softc *sc)
1153 {
1154 struct bridge_iflist *bif;
1155 int enabled, mask;
1156
1157 BRIDGE_LOCK_ASSERT(sc);
1158
1159 /* Initial bitmask of capabilities to test */
1160 mask = BRIDGE_IFCAPS_MASK;
1161
1162 CK_LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
1163 /* Every member must support it or it's disabled */
1164 mask &= bif->bif_savedcaps;
1165 }
1166
1167 CK_LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
1168 enabled = bif->bif_ifp->if_capenable;
1169 enabled &= ~BRIDGE_IFCAPS_STRIP;
1170 /* Strip off mask bits and enable them again if allowed */
1171 enabled &= ~BRIDGE_IFCAPS_MASK;
1172 enabled |= mask;
1173 bridge_set_ifcap(sc, bif, enabled);
1174 }
1175 }
1176
1177 static void
bridge_set_ifcap(struct bridge_softc * sc,struct bridge_iflist * bif,int set)1178 bridge_set_ifcap(struct bridge_softc *sc, struct bridge_iflist *bif, int set)
1179 {
1180 struct ifnet *ifp = bif->bif_ifp;
1181 struct ifreq ifr;
1182 int error, mask, stuck;
1183
1184 bzero(&ifr, sizeof(ifr));
1185 ifr.ifr_reqcap = set;
1186
1187 if (ifp->if_capenable != set) {
1188 error = (*ifp->if_ioctl)(ifp, SIOCSIFCAP, (caddr_t)&ifr);
1189 if (error)
1190 if_printf(sc->sc_ifp,
1191 "error setting capabilities on %s: %d\n",
1192 ifp->if_xname, error);
1193 mask = BRIDGE_IFCAPS_MASK | BRIDGE_IFCAPS_STRIP;
1194 stuck = ifp->if_capenable & mask & ~set;
1195 if (stuck != 0)
1196 if_printf(sc->sc_ifp,
1197 "can't disable some capabilities on %s: 0x%x\n",
1198 ifp->if_xname, stuck);
1199 }
1200 }
1201
1202 /*
1203 * bridge_lookup_member:
1204 *
1205 * Lookup a bridge member interface.
1206 */
1207 static struct bridge_iflist *
bridge_lookup_member(struct bridge_softc * sc,const char * name)1208 bridge_lookup_member(struct bridge_softc *sc, const char *name)
1209 {
1210 struct bridge_iflist *bif;
1211 struct ifnet *ifp;
1212
1213 BRIDGE_LOCK_OR_NET_EPOCH_ASSERT(sc);
1214
1215 CK_LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
1216 ifp = bif->bif_ifp;
1217 if (strcmp(ifp->if_xname, name) == 0)
1218 return (bif);
1219 }
1220
1221 return (NULL);
1222 }
1223
1224 /*
1225 * bridge_lookup_member_if:
1226 *
1227 * Lookup a bridge member interface by ifnet*.
1228 */
1229 static struct bridge_iflist *
bridge_lookup_member_if(struct bridge_softc * sc,struct ifnet * member_ifp)1230 bridge_lookup_member_if(struct bridge_softc *sc, struct ifnet *member_ifp)
1231 {
1232 BRIDGE_LOCK_OR_NET_EPOCH_ASSERT(sc);
1233 return (member_ifp->if_bridge);
1234 }
1235
1236 static void
bridge_delete_member_cb(struct epoch_context * ctx)1237 bridge_delete_member_cb(struct epoch_context *ctx)
1238 {
1239 struct bridge_iflist *bif;
1240
1241 bif = __containerof(ctx, struct bridge_iflist, bif_epoch_ctx);
1242
1243 free(bif, M_DEVBUF);
1244 }
1245
1246 /*
1247 * bridge_delete_member:
1248 *
1249 * Delete the specified member interface.
1250 */
1251 static void
bridge_delete_member(struct bridge_softc * sc,struct bridge_iflist * bif,int gone)1252 bridge_delete_member(struct bridge_softc *sc, struct bridge_iflist *bif,
1253 int gone)
1254 {
1255 struct ifnet *ifs = bif->bif_ifp;
1256 struct ifnet *fif = NULL;
1257 struct bridge_iflist *bifl;
1258
1259 BRIDGE_LOCK_ASSERT(sc);
1260
1261 if (bif->bif_flags & IFBIF_STP)
1262 bstp_disable(&bif->bif_stp);
1263
1264 ifs->if_bridge = NULL;
1265 CK_LIST_REMOVE(bif, bif_next);
1266
1267 /*
1268 * If removing the interface that gave the bridge its mac address, set
1269 * the mac address of the bridge to the address of the next member, or
1270 * to its default address if no members are left.
1271 */
1272 if (V_bridge_inherit_mac && sc->sc_ifaddr == ifs) {
1273 if (CK_LIST_EMPTY(&sc->sc_iflist)) {
1274 bcopy(&sc->sc_defaddr,
1275 IF_LLADDR(sc->sc_ifp), ETHER_ADDR_LEN);
1276 sc->sc_ifaddr = NULL;
1277 } else {
1278 bifl = CK_LIST_FIRST(&sc->sc_iflist);
1279 fif = bifl->bif_ifp;
1280 bcopy(IF_LLADDR(fif),
1281 IF_LLADDR(sc->sc_ifp), ETHER_ADDR_LEN);
1282 sc->sc_ifaddr = fif;
1283 }
1284 EVENTHANDLER_INVOKE(iflladdr_event, sc->sc_ifp);
1285 }
1286
1287 bridge_linkcheck(sc);
1288 bridge_mutecaps(sc); /* recalcuate now this interface is removed */
1289 BRIDGE_RT_LOCK(sc);
1290 bridge_rtdelete(sc, ifs, IFBF_FLUSHALL);
1291 BRIDGE_RT_UNLOCK(sc);
1292 KASSERT(bif->bif_addrcnt == 0,
1293 ("%s: %d bridge routes referenced", __func__, bif->bif_addrcnt));
1294
1295 ifs->if_bridge_output = NULL;
1296 ifs->if_bridge_input = NULL;
1297 ifs->if_bridge_linkstate = NULL;
1298 if (!gone) {
1299 switch (ifs->if_type) {
1300 case IFT_ETHER:
1301 case IFT_L2VLAN:
1302 /*
1303 * Take the interface out of promiscuous mode, but only
1304 * if it was promiscuous in the first place. It might
1305 * not be if we're in the bridge_ioctl_add() error path.
1306 */
1307 if (ifs->if_flags & IFF_PROMISC)
1308 (void) ifpromisc(ifs, 0);
1309 break;
1310
1311 case IFT_GIF:
1312 break;
1313
1314 default:
1315 #ifdef DIAGNOSTIC
1316 panic("bridge_delete_member: impossible");
1317 #endif
1318 break;
1319 }
1320 /* Re-enable any interface capabilities */
1321 bridge_set_ifcap(sc, bif, bif->bif_savedcaps);
1322 }
1323 bstp_destroy(&bif->bif_stp); /* prepare to free */
1324
1325 NET_EPOCH_CALL(bridge_delete_member_cb, &bif->bif_epoch_ctx);
1326 }
1327
1328 /*
1329 * bridge_delete_span:
1330 *
1331 * Delete the specified span interface.
1332 */
1333 static void
bridge_delete_span(struct bridge_softc * sc,struct bridge_iflist * bif)1334 bridge_delete_span(struct bridge_softc *sc, struct bridge_iflist *bif)
1335 {
1336 BRIDGE_LOCK_ASSERT(sc);
1337
1338 KASSERT(bif->bif_ifp->if_bridge == NULL,
1339 ("%s: not a span interface", __func__));
1340
1341 CK_LIST_REMOVE(bif, bif_next);
1342
1343 NET_EPOCH_CALL(bridge_delete_member_cb, &bif->bif_epoch_ctx);
1344 }
1345
1346 static int
bridge_ioctl_add(struct bridge_softc * sc,void * arg)1347 bridge_ioctl_add(struct bridge_softc *sc, void *arg)
1348 {
1349 struct ifbreq *req = arg;
1350 struct bridge_iflist *bif = NULL;
1351 struct ifnet *ifs;
1352 int error = 0;
1353
1354 ifs = ifunit(req->ifbr_ifsname);
1355 if (ifs == NULL)
1356 return (EXTERROR(ENOENT, "No such interface",
1357 req->ifbr_ifsname));
1358 if (ifs->if_ioctl == NULL) /* must be supported */
1359 return (EXTERROR(EINVAL, "Interface must support ioctl(2)"));
1360
1361 /*
1362 * If the new interface is a vlan(4), it could be a bridge SVI.
1363 * Don't allow such things to be added to bridges.
1364 */
1365 if (ifs->if_type == IFT_L2VLAN) {
1366 struct ifnet *parent;
1367 struct epoch_tracker et;
1368 bool is_bridge;
1369
1370 /*
1371 * Entering NET_EPOCH with BRIDGE_LOCK held, but this is okay
1372 * since we don't sleep here.
1373 */
1374 NET_EPOCH_ENTER(et);
1375 parent = VLAN_TRUNKDEV(ifs);
1376 is_bridge = (parent != NULL && parent->if_type == IFT_BRIDGE);
1377 NET_EPOCH_EXIT(et);
1378
1379 if (is_bridge)
1380 return (EXTERROR(EINVAL,
1381 "Bridge SVI cannot be added to a bridge"));
1382 }
1383
1384 /* If it's in the span list, it can't be a member. */
1385 CK_LIST_FOREACH(bif, &sc->sc_spanlist, bif_next)
1386 if (ifs == bif->bif_ifp)
1387 return (EXTERROR(EBUSY,
1388 "Span interface cannot be a member"));
1389
1390 if (ifs->if_bridge) {
1391 struct bridge_iflist *sbif = ifs->if_bridge;
1392 if (sbif->bif_sc == sc)
1393 return (EXTERROR(EEXIST,
1394 "Interface is already a member of this bridge"));
1395
1396 return (EXTERROR(EBUSY,
1397 "Interface is already a member of another bridge"));
1398 }
1399
1400 switch (ifs->if_type) {
1401 case IFT_ETHER:
1402 case IFT_L2VLAN:
1403 case IFT_GIF:
1404 /* permitted interface types */
1405 break;
1406 default:
1407 return (EXTERROR(EINVAL, "Unsupported interface type"));
1408 }
1409
1410 #ifdef INET6
1411 /*
1412 * Two valid inet6 addresses with link-local scope must not be
1413 * on the parent interface and the member interfaces at the
1414 * same time. This restriction is needed to prevent violation
1415 * of link-local scope zone. Attempts to add a member
1416 * interface which has inet6 addresses when the parent has
1417 * inet6 triggers removal of all inet6 addresses on the member
1418 * interface.
1419 */
1420
1421 /* Check if the parent interface has a link-local scope addr. */
1422 if (V_allow_llz_overlap == 0 &&
1423 in6ifa_llaonifp(sc->sc_ifp) != NULL) {
1424 /*
1425 * If any, remove all inet6 addresses from the member
1426 * interfaces.
1427 */
1428 CK_LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
1429 if (in6ifa_llaonifp(bif->bif_ifp)) {
1430 in6_ifdetach(bif->bif_ifp);
1431 if_printf(sc->sc_ifp,
1432 "IPv6 addresses on %s have been removed "
1433 "before adding it as a member to prevent "
1434 "IPv6 address scope violation.\n",
1435 bif->bif_ifp->if_xname);
1436 }
1437 }
1438 if (in6ifa_llaonifp(ifs)) {
1439 in6_ifdetach(ifs);
1440 if_printf(sc->sc_ifp,
1441 "IPv6 addresses on %s have been removed "
1442 "before adding it as a member to prevent "
1443 "IPv6 address scope violation.\n",
1444 ifs->if_xname);
1445 }
1446 }
1447 #endif
1448
1449 /*
1450 * If member_ifaddrs is disabled, do not allow an interface with
1451 * assigned IP addresses to be added to a bridge. Skip this check
1452 * for gif interfaces, because the IP address assigned to a gif
1453 * interface is separate from the bridge's Ethernet segment.
1454 */
1455 if (ifs->if_type != IFT_GIF) {
1456 struct ifaddr *ifa;
1457
1458 CK_STAILQ_FOREACH(ifa, &ifs->if_addrhead, ifa_link) {
1459 if (ifa->ifa_addr->sa_family != AF_INET &&
1460 ifa->ifa_addr->sa_family != AF_INET6)
1461 continue;
1462
1463 if (V_member_ifaddrs) {
1464 if_printf(sc->sc_ifp,
1465 "WARNING: Adding member interface %s which "
1466 "has an IP address assigned is deprecated "
1467 "and will be unsupported in a future "
1468 "release.\n", ifs->if_xname);
1469 break;
1470 } else {
1471 return (EXTERROR(EINVAL,
1472 "Member interface may not have "
1473 "an IP address assigned"));
1474 }
1475 }
1476 }
1477
1478 /* Allow the first Ethernet member to define the MTU */
1479 if (CK_LIST_EMPTY(&sc->sc_iflist))
1480 sc->sc_ifp->if_mtu = ifs->if_mtu;
1481 else if (sc->sc_ifp->if_mtu != ifs->if_mtu) {
1482 struct ifreq ifr;
1483
1484 snprintf(ifr.ifr_name, sizeof(ifr.ifr_name), "%s",
1485 ifs->if_xname);
1486 ifr.ifr_mtu = sc->sc_ifp->if_mtu;
1487
1488 error = (*ifs->if_ioctl)(ifs,
1489 SIOCSIFMTU, (caddr_t)&ifr);
1490 if (error != 0) {
1491 log(LOG_NOTICE, "%s: invalid MTU: %u for"
1492 " new member %s\n", sc->sc_ifp->if_xname,
1493 ifr.ifr_mtu,
1494 ifs->if_xname);
1495 return (EXTERROR(EINVAL,
1496 "Failed to set MTU on new member"));
1497 }
1498 }
1499
1500 bif = malloc(sizeof(*bif), M_DEVBUF, M_NOWAIT|M_ZERO);
1501 if (bif == NULL)
1502 return (ENOMEM);
1503
1504 bif->bif_sc = sc;
1505 bif->bif_ifp = ifs;
1506 bif->bif_flags = IFBIF_LEARNING | IFBIF_DISCOVER;
1507 bif->bif_savedcaps = ifs->if_capenable;
1508 bif->bif_vlanproto = ETHERTYPE_VLAN;
1509 bif->bif_pvid = sc->sc_defpvid;
1510 if (sc->sc_flags & IFBRF_DEFQINQ)
1511 bif->bif_flags |= IFBIF_QINQ;
1512
1513 /*
1514 * Assign the interface's MAC address to the bridge if it's the first
1515 * member and the MAC address of the bridge has not been changed from
1516 * the default randomly generated one.
1517 */
1518 if (V_bridge_inherit_mac && CK_LIST_EMPTY(&sc->sc_iflist) &&
1519 !memcmp(IF_LLADDR(sc->sc_ifp), sc->sc_defaddr.octet, ETHER_ADDR_LEN)) {
1520 bcopy(IF_LLADDR(ifs), IF_LLADDR(sc->sc_ifp), ETHER_ADDR_LEN);
1521 sc->sc_ifaddr = ifs;
1522 EVENTHANDLER_INVOKE(iflladdr_event, sc->sc_ifp);
1523 }
1524
1525 ifs->if_bridge = bif;
1526 ifs->if_bridge_output = bridge_output;
1527 ifs->if_bridge_input = bridge_input;
1528 ifs->if_bridge_linkstate = bridge_linkstate;
1529 bstp_create(&sc->sc_stp, &bif->bif_stp, bif->bif_ifp);
1530 /*
1531 * XXX: XLOCK HERE!?!
1532 *
1533 * NOTE: insert_***HEAD*** should be safe for the traversals.
1534 */
1535 CK_LIST_INSERT_HEAD(&sc->sc_iflist, bif, bif_next);
1536
1537 /* Set interface capabilities to the intersection set of all members */
1538 bridge_mutecaps(sc);
1539 bridge_linkcheck(sc);
1540
1541 /* Place the interface into promiscuous mode */
1542 switch (ifs->if_type) {
1543 case IFT_ETHER:
1544 case IFT_L2VLAN:
1545 error = ifpromisc(ifs, 1);
1546 break;
1547 }
1548
1549 if (error)
1550 bridge_delete_member(sc, bif, 0);
1551 return (error);
1552 }
1553
1554 static int
bridge_ioctl_del(struct bridge_softc * sc,void * arg)1555 bridge_ioctl_del(struct bridge_softc *sc, void *arg)
1556 {
1557 struct ifbreq *req = arg;
1558 struct bridge_iflist *bif;
1559
1560 bif = bridge_lookup_member(sc, req->ifbr_ifsname);
1561 if (bif == NULL)
1562 return (EXTERROR(ENOENT, "Interface is not a bridge member"));
1563
1564 bridge_delete_member(sc, bif, 0);
1565
1566 return (0);
1567 }
1568
1569 static int
bridge_ioctl_gifflags(struct bridge_softc * sc,void * arg)1570 bridge_ioctl_gifflags(struct bridge_softc *sc, void *arg)
1571 {
1572 struct ifbreq *req = arg;
1573 struct bridge_iflist *bif;
1574 struct bstp_port *bp;
1575
1576 bif = bridge_lookup_member(sc, req->ifbr_ifsname);
1577 if (bif == NULL)
1578 return (EXTERROR(ENOENT, "Interface is not a bridge member"));
1579
1580 bp = &bif->bif_stp;
1581 req->ifbr_ifsflags = bif->bif_flags;
1582 req->ifbr_state = bp->bp_state;
1583 req->ifbr_priority = bp->bp_priority;
1584 req->ifbr_path_cost = bp->bp_path_cost;
1585 req->ifbr_portno = bif->bif_ifp->if_index & 0xfff;
1586 req->ifbr_proto = bp->bp_protover;
1587 req->ifbr_role = bp->bp_role;
1588 req->ifbr_stpflags = bp->bp_flags;
1589 req->ifbr_addrcnt = bif->bif_addrcnt;
1590 req->ifbr_addrmax = bif->bif_addrmax;
1591 req->ifbr_addrexceeded = bif->bif_addrexceeded;
1592 req->ifbr_pvid = bif->bif_pvid;
1593 req->ifbr_vlanproto = bif->bif_vlanproto;
1594
1595 /* Copy STP state options as flags */
1596 if (bp->bp_operedge)
1597 req->ifbr_ifsflags |= IFBIF_BSTP_EDGE;
1598 if (bp->bp_flags & BSTP_PORT_AUTOEDGE)
1599 req->ifbr_ifsflags |= IFBIF_BSTP_AUTOEDGE;
1600 if (bp->bp_ptp_link)
1601 req->ifbr_ifsflags |= IFBIF_BSTP_PTP;
1602 if (bp->bp_flags & BSTP_PORT_AUTOPTP)
1603 req->ifbr_ifsflags |= IFBIF_BSTP_AUTOPTP;
1604 if (bp->bp_flags & BSTP_PORT_ADMEDGE)
1605 req->ifbr_ifsflags |= IFBIF_BSTP_ADMEDGE;
1606 if (bp->bp_flags & BSTP_PORT_ADMCOST)
1607 req->ifbr_ifsflags |= IFBIF_BSTP_ADMCOST;
1608 return (0);
1609 }
1610
1611 static int
bridge_ioctl_sifflags(struct bridge_softc * sc,void * arg)1612 bridge_ioctl_sifflags(struct bridge_softc *sc, void *arg)
1613 {
1614 struct epoch_tracker et;
1615 struct ifbreq *req = arg;
1616 struct bridge_iflist *bif;
1617 struct bstp_port *bp;
1618 int error;
1619
1620 bif = bridge_lookup_member(sc, req->ifbr_ifsname);
1621 if (bif == NULL)
1622 return (EXTERROR(ENOENT, "Interface is not a bridge member"));
1623 bp = &bif->bif_stp;
1624
1625 if (req->ifbr_ifsflags & IFBIF_SPAN)
1626 /* SPAN is readonly */
1627 return (EXTERROR(EINVAL, "Span interface cannot be modified"));
1628
1629 NET_EPOCH_ENTER(et);
1630
1631 if (req->ifbr_ifsflags & IFBIF_STP) {
1632 if ((bif->bif_flags & IFBIF_STP) == 0) {
1633 error = bstp_enable(&bif->bif_stp);
1634 if (error) {
1635 NET_EPOCH_EXIT(et);
1636 return (EXTERROR(error,
1637 "Failed to enable STP"));
1638 }
1639 }
1640 } else {
1641 if ((bif->bif_flags & IFBIF_STP) != 0)
1642 bstp_disable(&bif->bif_stp);
1643 }
1644
1645 /* Pass on STP flags */
1646 bstp_set_edge(bp, req->ifbr_ifsflags & IFBIF_BSTP_EDGE ? 1 : 0);
1647 bstp_set_autoedge(bp, req->ifbr_ifsflags & IFBIF_BSTP_AUTOEDGE ? 1 : 0);
1648 bstp_set_ptp(bp, req->ifbr_ifsflags & IFBIF_BSTP_PTP ? 1 : 0);
1649 bstp_set_autoptp(bp, req->ifbr_ifsflags & IFBIF_BSTP_AUTOPTP ? 1 : 0);
1650
1651 /* Save the bits relating to the bridge */
1652 bif->bif_flags = req->ifbr_ifsflags & IFBIFMASK;
1653
1654 NET_EPOCH_EXIT(et);
1655
1656 return (0);
1657 }
1658
1659 static int
bridge_ioctl_scache(struct bridge_softc * sc,void * arg)1660 bridge_ioctl_scache(struct bridge_softc *sc, void *arg)
1661 {
1662 struct ifbrparam *param = arg;
1663
1664 sc->sc_brtmax = param->ifbrp_csize;
1665 bridge_rttrim(sc);
1666
1667 return (0);
1668 }
1669
1670 static int
bridge_ioctl_gcache(struct bridge_softc * sc,void * arg)1671 bridge_ioctl_gcache(struct bridge_softc *sc, void *arg)
1672 {
1673 struct ifbrparam *param = arg;
1674
1675 param->ifbrp_csize = sc->sc_brtmax;
1676
1677 return (0);
1678 }
1679
1680 static int
bridge_ioctl_gifs(struct bridge_softc * sc,void * arg)1681 bridge_ioctl_gifs(struct bridge_softc *sc, void *arg)
1682 {
1683 struct ifbifconf *bifc = arg;
1684 struct bridge_iflist *bif;
1685 struct ifbreq breq;
1686 char *buf, *outbuf;
1687 int count, buflen, len, error = 0;
1688
1689 count = 0;
1690 CK_LIST_FOREACH(bif, &sc->sc_iflist, bif_next)
1691 count++;
1692 CK_LIST_FOREACH(bif, &sc->sc_spanlist, bif_next)
1693 count++;
1694
1695 buflen = sizeof(breq) * count;
1696 if (bifc->ifbic_len == 0) {
1697 bifc->ifbic_len = buflen;
1698 return (0);
1699 }
1700 outbuf = malloc(buflen, M_TEMP, M_NOWAIT | M_ZERO);
1701 if (outbuf == NULL)
1702 return (ENOMEM);
1703
1704 count = 0;
1705 buf = outbuf;
1706 len = min(bifc->ifbic_len, buflen);
1707 bzero(&breq, sizeof(breq));
1708 CK_LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
1709 if (len < sizeof(breq))
1710 break;
1711
1712 strlcpy(breq.ifbr_ifsname, bif->bif_ifp->if_xname,
1713 sizeof(breq.ifbr_ifsname));
1714 /* Fill in the ifbreq structure */
1715 error = bridge_ioctl_gifflags(sc, &breq);
1716 if (error)
1717 break;
1718 memcpy(buf, &breq, sizeof(breq));
1719 count++;
1720 buf += sizeof(breq);
1721 len -= sizeof(breq);
1722 }
1723 CK_LIST_FOREACH(bif, &sc->sc_spanlist, bif_next) {
1724 if (len < sizeof(breq))
1725 break;
1726
1727 strlcpy(breq.ifbr_ifsname, bif->bif_ifp->if_xname,
1728 sizeof(breq.ifbr_ifsname));
1729 breq.ifbr_ifsflags = bif->bif_flags;
1730 breq.ifbr_portno = bif->bif_ifp->if_index & 0xfff;
1731 memcpy(buf, &breq, sizeof(breq));
1732 count++;
1733 buf += sizeof(breq);
1734 len -= sizeof(breq);
1735 }
1736
1737 bifc->ifbic_len = sizeof(breq) * count;
1738 error = copyout(outbuf, bifc->ifbic_req, bifc->ifbic_len);
1739 free(outbuf, M_TEMP);
1740 return (error);
1741 }
1742
1743 static int
bridge_ioctl_rts(struct bridge_softc * sc,void * arg)1744 bridge_ioctl_rts(struct bridge_softc *sc, void *arg)
1745 {
1746 struct ifbaconf *bac = arg;
1747 struct bridge_rtnode *brt;
1748 struct ifbareq bareq;
1749 char *buf, *outbuf;
1750 int count, buflen, len, error = 0;
1751
1752 if (bac->ifbac_len == 0)
1753 return (0);
1754
1755 count = 0;
1756 CK_LIST_FOREACH(brt, &sc->sc_rtlist, brt_list)
1757 count++;
1758 buflen = sizeof(bareq) * count;
1759
1760 outbuf = malloc(buflen, M_TEMP, M_NOWAIT | M_ZERO);
1761 if (outbuf == NULL)
1762 return (ENOMEM);
1763
1764 count = 0;
1765 buf = outbuf;
1766 len = min(bac->ifbac_len, buflen);
1767 bzero(&bareq, sizeof(bareq));
1768 CK_LIST_FOREACH(brt, &sc->sc_rtlist, brt_list) {
1769 if (len < sizeof(bareq))
1770 goto out;
1771 strlcpy(bareq.ifba_ifsname, brt->brt_ifp->if_xname,
1772 sizeof(bareq.ifba_ifsname));
1773 memcpy(bareq.ifba_dst, brt->brt_addr, sizeof(brt->brt_addr));
1774 bareq.ifba_vlan = brt->brt_vlan;
1775 if ((brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC &&
1776 time_uptime < brt->brt_expire)
1777 bareq.ifba_expire = brt->brt_expire - time_uptime;
1778 else
1779 bareq.ifba_expire = 0;
1780 bareq.ifba_flags = brt->brt_flags;
1781
1782 memcpy(buf, &bareq, sizeof(bareq));
1783 count++;
1784 buf += sizeof(bareq);
1785 len -= sizeof(bareq);
1786 }
1787 out:
1788 bac->ifbac_len = sizeof(bareq) * count;
1789 error = copyout(outbuf, bac->ifbac_req, bac->ifbac_len);
1790 free(outbuf, M_TEMP);
1791 return (error);
1792 }
1793
1794 static int
bridge_ioctl_saddr(struct bridge_softc * sc,void * arg)1795 bridge_ioctl_saddr(struct bridge_softc *sc, void *arg)
1796 {
1797 struct ifbareq *req = arg;
1798 struct bridge_iflist *bif;
1799 struct epoch_tracker et;
1800 int error;
1801
1802 NET_EPOCH_ENTER(et);
1803 bif = bridge_lookup_member(sc, req->ifba_ifsname);
1804 if (bif == NULL) {
1805 NET_EPOCH_EXIT(et);
1806 return (EXTERROR(ENOENT, "Interface is not a bridge member"));
1807 }
1808
1809 /* bridge_rtupdate() may acquire the lock. */
1810 error = bridge_rtupdate(sc, req->ifba_dst, req->ifba_vlan, bif, 1,
1811 req->ifba_flags);
1812 NET_EPOCH_EXIT(et);
1813
1814 return (error);
1815 }
1816
1817 static int
bridge_ioctl_sto(struct bridge_softc * sc,void * arg)1818 bridge_ioctl_sto(struct bridge_softc *sc, void *arg)
1819 {
1820 struct ifbrparam *param = arg;
1821
1822 sc->sc_brttimeout = param->ifbrp_ctime;
1823 return (0);
1824 }
1825
1826 static int
bridge_ioctl_gto(struct bridge_softc * sc,void * arg)1827 bridge_ioctl_gto(struct bridge_softc *sc, void *arg)
1828 {
1829 struct ifbrparam *param = arg;
1830
1831 param->ifbrp_ctime = sc->sc_brttimeout;
1832 return (0);
1833 }
1834
1835 static int
bridge_ioctl_daddr(struct bridge_softc * sc,void * arg)1836 bridge_ioctl_daddr(struct bridge_softc *sc, void *arg)
1837 {
1838 struct ifbareq *req = arg;
1839 int vlan = req->ifba_vlan;
1840
1841 /* Userspace uses '0' to mean 'any vlan' */
1842 if (vlan == 0)
1843 vlan = DOT1Q_VID_RSVD_IMPL;
1844
1845 return (bridge_rtdaddr(sc, req->ifba_dst, vlan));
1846 }
1847
1848 static int
bridge_ioctl_flush(struct bridge_softc * sc,void * arg)1849 bridge_ioctl_flush(struct bridge_softc *sc, void *arg)
1850 {
1851 struct ifbreq *req = arg;
1852
1853 BRIDGE_RT_LOCK(sc);
1854 bridge_rtflush(sc, req->ifbr_ifsflags);
1855 BRIDGE_RT_UNLOCK(sc);
1856
1857 return (0);
1858 }
1859
1860 static int
bridge_ioctl_gpri(struct bridge_softc * sc,void * arg)1861 bridge_ioctl_gpri(struct bridge_softc *sc, void *arg)
1862 {
1863 struct ifbrparam *param = arg;
1864 struct bstp_state *bs = &sc->sc_stp;
1865
1866 param->ifbrp_prio = bs->bs_bridge_priority;
1867 return (0);
1868 }
1869
1870 static int
bridge_ioctl_spri(struct bridge_softc * sc,void * arg)1871 bridge_ioctl_spri(struct bridge_softc *sc, void *arg)
1872 {
1873 struct ifbrparam *param = arg;
1874
1875 return (bstp_set_priority(&sc->sc_stp, param->ifbrp_prio));
1876 }
1877
1878 static int
bridge_ioctl_ght(struct bridge_softc * sc,void * arg)1879 bridge_ioctl_ght(struct bridge_softc *sc, void *arg)
1880 {
1881 struct ifbrparam *param = arg;
1882 struct bstp_state *bs = &sc->sc_stp;
1883
1884 param->ifbrp_hellotime = bs->bs_bridge_htime >> 8;
1885 return (0);
1886 }
1887
1888 static int
bridge_ioctl_sht(struct bridge_softc * sc,void * arg)1889 bridge_ioctl_sht(struct bridge_softc *sc, void *arg)
1890 {
1891 struct ifbrparam *param = arg;
1892
1893 return (bstp_set_htime(&sc->sc_stp, param->ifbrp_hellotime));
1894 }
1895
1896 static int
bridge_ioctl_gfd(struct bridge_softc * sc,void * arg)1897 bridge_ioctl_gfd(struct bridge_softc *sc, void *arg)
1898 {
1899 struct ifbrparam *param = arg;
1900 struct bstp_state *bs = &sc->sc_stp;
1901
1902 param->ifbrp_fwddelay = bs->bs_bridge_fdelay >> 8;
1903 return (0);
1904 }
1905
1906 static int
bridge_ioctl_sfd(struct bridge_softc * sc,void * arg)1907 bridge_ioctl_sfd(struct bridge_softc *sc, void *arg)
1908 {
1909 struct ifbrparam *param = arg;
1910
1911 return (bstp_set_fdelay(&sc->sc_stp, param->ifbrp_fwddelay));
1912 }
1913
1914 static int
bridge_ioctl_gma(struct bridge_softc * sc,void * arg)1915 bridge_ioctl_gma(struct bridge_softc *sc, void *arg)
1916 {
1917 struct ifbrparam *param = arg;
1918 struct bstp_state *bs = &sc->sc_stp;
1919
1920 param->ifbrp_maxage = bs->bs_bridge_max_age >> 8;
1921 return (0);
1922 }
1923
1924 static int
bridge_ioctl_sma(struct bridge_softc * sc,void * arg)1925 bridge_ioctl_sma(struct bridge_softc *sc, void *arg)
1926 {
1927 struct ifbrparam *param = arg;
1928
1929 return (bstp_set_maxage(&sc->sc_stp, param->ifbrp_maxage));
1930 }
1931
1932 static int
bridge_ioctl_sifprio(struct bridge_softc * sc,void * arg)1933 bridge_ioctl_sifprio(struct bridge_softc *sc, void *arg)
1934 {
1935 struct ifbreq *req = arg;
1936 struct bridge_iflist *bif;
1937
1938 bif = bridge_lookup_member(sc, req->ifbr_ifsname);
1939 if (bif == NULL)
1940 return (EXTERROR(ENOENT, "Interface is not a bridge member"));
1941
1942 return (bstp_set_port_priority(&bif->bif_stp, req->ifbr_priority));
1943 }
1944
1945 static int
bridge_ioctl_sifcost(struct bridge_softc * sc,void * arg)1946 bridge_ioctl_sifcost(struct bridge_softc *sc, void *arg)
1947 {
1948 struct ifbreq *req = arg;
1949 struct bridge_iflist *bif;
1950
1951 bif = bridge_lookup_member(sc, req->ifbr_ifsname);
1952 if (bif == NULL)
1953 return (EXTERROR(ENOENT, "Interface is not a bridge member"));
1954
1955 return (bstp_set_path_cost(&bif->bif_stp, req->ifbr_path_cost));
1956 }
1957
1958 static int
bridge_ioctl_sifmaxaddr(struct bridge_softc * sc,void * arg)1959 bridge_ioctl_sifmaxaddr(struct bridge_softc *sc, void *arg)
1960 {
1961 struct ifbreq *req = arg;
1962 struct bridge_iflist *bif;
1963
1964 bif = bridge_lookup_member(sc, req->ifbr_ifsname);
1965 if (bif == NULL)
1966 return (EXTERROR(ENOENT, "Interface is not a bridge member"));
1967
1968 bif->bif_addrmax = req->ifbr_addrmax;
1969 return (0);
1970 }
1971
1972 static int
bridge_ioctl_sifpvid(struct bridge_softc * sc,void * arg)1973 bridge_ioctl_sifpvid(struct bridge_softc *sc, void *arg)
1974 {
1975 struct ifbreq *req = arg;
1976 struct bridge_iflist *bif;
1977
1978 bif = bridge_lookup_member(sc, req->ifbr_ifsname);
1979 if (bif == NULL)
1980 return (EXTERROR(ENOENT, "Interface is not a bridge member"));
1981
1982 if (req->ifbr_pvid > DOT1Q_VID_MAX)
1983 return (EXTERROR(EINVAL, "Invalid VLAN ID"));
1984
1985 bif->bif_pvid = req->ifbr_pvid;
1986 return (0);
1987 }
1988
1989 static int
bridge_ioctl_sifvlanset(struct bridge_softc * sc,void * arg)1990 bridge_ioctl_sifvlanset(struct bridge_softc *sc, void *arg)
1991 {
1992 struct ifbif_vlan_req *req = arg;
1993 struct bridge_iflist *bif;
1994
1995 if ((sc->sc_flags & IFBRF_VLANFILTER) == 0)
1996 return (EXTERROR(EINVAL, "VLAN filtering not enabled"));
1997
1998 bif = bridge_lookup_member(sc, req->bv_ifname);
1999 if (bif == NULL)
2000 return (EXTERROR(ENOENT, "Interface is not a bridge member"));
2001
2002 /* Reject invalid VIDs. */
2003 if (BRVLAN_TEST(&req->bv_set, DOT1Q_VID_NULL) ||
2004 BRVLAN_TEST(&req->bv_set, DOT1Q_VID_RSVD_IMPL))
2005 return (EXTERROR(EINVAL, "Invalid VLAN ID in set"));
2006
2007 switch (req->bv_op) {
2008 /* Replace the existing vlan set with the new set */
2009 case BRDG_VLAN_OP_SET:
2010 BIT_COPY(BRVLAN_SETSIZE, &req->bv_set, &bif->bif_vlan_set);
2011 break;
2012
2013 /* Modify the existing vlan set to add the given vlans */
2014 case BRDG_VLAN_OP_ADD:
2015 BIT_OR(BRVLAN_SETSIZE, &bif->bif_vlan_set, &req->bv_set);
2016 break;
2017
2018 /* Modify the existing vlan set to remove the given vlans */
2019 case BRDG_VLAN_OP_DEL:
2020 BIT_ANDNOT(BRVLAN_SETSIZE, &bif->bif_vlan_set, &req->bv_set);
2021 break;
2022
2023 /* Invalid or unknown operation */
2024 default:
2025 return (EXTERROR(EINVAL,
2026 "Unsupported BRDGSIFVLANSET operation"));
2027 }
2028
2029 return (0);
2030 }
2031
2032 static int
bridge_ioctl_gifvlanset(struct bridge_softc * sc,void * arg)2033 bridge_ioctl_gifvlanset(struct bridge_softc *sc, void *arg)
2034 {
2035 struct ifbif_vlan_req *req = arg;
2036 struct bridge_iflist *bif;
2037
2038 bif = bridge_lookup_member(sc, req->bv_ifname);
2039 if (bif == NULL)
2040 return (EXTERROR(ENOENT, "Interface is not a bridge member"));
2041
2042 BIT_COPY(BRVLAN_SETSIZE, &bif->bif_vlan_set, &req->bv_set);
2043 return (0);
2044 }
2045
2046 static int
bridge_ioctl_addspan(struct bridge_softc * sc,void * arg)2047 bridge_ioctl_addspan(struct bridge_softc *sc, void *arg)
2048 {
2049 struct ifbreq *req = arg;
2050 struct bridge_iflist *bif = NULL;
2051 struct ifnet *ifs;
2052
2053 ifs = ifunit(req->ifbr_ifsname);
2054 if (ifs == NULL)
2055 return (EXTERROR(ENOENT, "No such interface"));
2056
2057 CK_LIST_FOREACH(bif, &sc->sc_spanlist, bif_next)
2058 if (ifs == bif->bif_ifp)
2059 return (EXTERROR(EBUSY,
2060 "Interface is already a span port"));
2061
2062 if (ifs->if_bridge != NULL)
2063 return (EXTERROR(EEXIST,
2064 "Interface is already a bridge member"));
2065
2066 switch (ifs->if_type) {
2067 case IFT_ETHER:
2068 case IFT_GIF:
2069 case IFT_L2VLAN:
2070 break;
2071 default:
2072 return (EXTERROR(EINVAL, "Unsupported interface type"));
2073 }
2074
2075 bif = malloc(sizeof(*bif), M_DEVBUF, M_NOWAIT|M_ZERO);
2076 if (bif == NULL)
2077 return (ENOMEM);
2078
2079 bif->bif_ifp = ifs;
2080 bif->bif_flags = IFBIF_SPAN;
2081
2082 CK_LIST_INSERT_HEAD(&sc->sc_spanlist, bif, bif_next);
2083
2084 return (0);
2085 }
2086
2087 static int
bridge_ioctl_delspan(struct bridge_softc * sc,void * arg)2088 bridge_ioctl_delspan(struct bridge_softc *sc, void *arg)
2089 {
2090 struct ifbreq *req = arg;
2091 struct bridge_iflist *bif;
2092 struct ifnet *ifs;
2093
2094 ifs = ifunit(req->ifbr_ifsname);
2095 if (ifs == NULL)
2096 return (EXTERROR(ENOENT, "No such interface"));
2097
2098 CK_LIST_FOREACH(bif, &sc->sc_spanlist, bif_next)
2099 if (ifs == bif->bif_ifp)
2100 break;
2101
2102 if (bif == NULL)
2103 return (EXTERROR(ENOENT, "Interface is not a span port"));
2104
2105 bridge_delete_span(sc, bif);
2106
2107 return (0);
2108 }
2109
2110 static int
bridge_ioctl_gbparam(struct bridge_softc * sc,void * arg)2111 bridge_ioctl_gbparam(struct bridge_softc *sc, void *arg)
2112 {
2113 struct ifbropreq *req = arg;
2114 struct bstp_state *bs = &sc->sc_stp;
2115 struct bstp_port *root_port;
2116
2117 req->ifbop_maxage = bs->bs_bridge_max_age >> 8;
2118 req->ifbop_hellotime = bs->bs_bridge_htime >> 8;
2119 req->ifbop_fwddelay = bs->bs_bridge_fdelay >> 8;
2120
2121 root_port = bs->bs_root_port;
2122 if (root_port == NULL)
2123 req->ifbop_root_port = 0;
2124 else
2125 req->ifbop_root_port = root_port->bp_ifp->if_index;
2126
2127 req->ifbop_holdcount = bs->bs_txholdcount;
2128 req->ifbop_priority = bs->bs_bridge_priority;
2129 req->ifbop_protocol = bs->bs_protover;
2130 req->ifbop_root_path_cost = bs->bs_root_pv.pv_cost;
2131 req->ifbop_bridgeid = bs->bs_bridge_pv.pv_dbridge_id;
2132 req->ifbop_designated_root = bs->bs_root_pv.pv_root_id;
2133 req->ifbop_designated_bridge = bs->bs_root_pv.pv_dbridge_id;
2134 req->ifbop_last_tc_time.tv_sec = bs->bs_last_tc_time.tv_sec;
2135 req->ifbop_last_tc_time.tv_usec = bs->bs_last_tc_time.tv_usec;
2136
2137 return (0);
2138 }
2139
2140 static int
bridge_ioctl_grte(struct bridge_softc * sc,void * arg)2141 bridge_ioctl_grte(struct bridge_softc *sc, void *arg)
2142 {
2143 struct ifbrparam *param = arg;
2144
2145 param->ifbrp_cexceeded = sc->sc_brtexceeded;
2146 return (0);
2147 }
2148
2149 static int
bridge_ioctl_gifsstp(struct bridge_softc * sc,void * arg)2150 bridge_ioctl_gifsstp(struct bridge_softc *sc, void *arg)
2151 {
2152 struct ifbpstpconf *bifstp = arg;
2153 struct bridge_iflist *bif;
2154 struct bstp_port *bp;
2155 struct ifbpstpreq bpreq;
2156 char *buf, *outbuf;
2157 int count, buflen, len, error = 0;
2158
2159 count = 0;
2160 CK_LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
2161 if ((bif->bif_flags & IFBIF_STP) != 0)
2162 count++;
2163 }
2164
2165 buflen = sizeof(bpreq) * count;
2166 if (bifstp->ifbpstp_len == 0) {
2167 bifstp->ifbpstp_len = buflen;
2168 return (0);
2169 }
2170
2171 outbuf = malloc(buflen, M_TEMP, M_NOWAIT | M_ZERO);
2172 if (outbuf == NULL)
2173 return (ENOMEM);
2174
2175 count = 0;
2176 buf = outbuf;
2177 len = min(bifstp->ifbpstp_len, buflen);
2178 bzero(&bpreq, sizeof(bpreq));
2179 CK_LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
2180 if (len < sizeof(bpreq))
2181 break;
2182
2183 if ((bif->bif_flags & IFBIF_STP) == 0)
2184 continue;
2185
2186 bp = &bif->bif_stp;
2187 bpreq.ifbp_portno = bif->bif_ifp->if_index & 0xfff;
2188 bpreq.ifbp_fwd_trans = bp->bp_forward_transitions;
2189 bpreq.ifbp_design_cost = bp->bp_desg_pv.pv_cost;
2190 bpreq.ifbp_design_port = bp->bp_desg_pv.pv_port_id;
2191 bpreq.ifbp_design_bridge = bp->bp_desg_pv.pv_dbridge_id;
2192 bpreq.ifbp_design_root = bp->bp_desg_pv.pv_root_id;
2193
2194 memcpy(buf, &bpreq, sizeof(bpreq));
2195 count++;
2196 buf += sizeof(bpreq);
2197 len -= sizeof(bpreq);
2198 }
2199
2200 bifstp->ifbpstp_len = sizeof(bpreq) * count;
2201 error = copyout(outbuf, bifstp->ifbpstp_req, bifstp->ifbpstp_len);
2202 free(outbuf, M_TEMP);
2203 return (error);
2204 }
2205
2206 static int
bridge_ioctl_sproto(struct bridge_softc * sc,void * arg)2207 bridge_ioctl_sproto(struct bridge_softc *sc, void *arg)
2208 {
2209 struct ifbrparam *param = arg;
2210
2211 return (bstp_set_protocol(&sc->sc_stp, param->ifbrp_proto));
2212 }
2213
2214 static int
bridge_ioctl_stxhc(struct bridge_softc * sc,void * arg)2215 bridge_ioctl_stxhc(struct bridge_softc *sc, void *arg)
2216 {
2217 struct ifbrparam *param = arg;
2218
2219 return (bstp_set_holdcount(&sc->sc_stp, param->ifbrp_txhc));
2220 }
2221
2222 static int
bridge_ioctl_gflags(struct bridge_softc * sc,void * arg)2223 bridge_ioctl_gflags(struct bridge_softc *sc, void *arg)
2224 {
2225 struct ifbrparam *param = arg;
2226
2227 param->ifbrp_flags = sc->sc_flags;
2228
2229 return (0);
2230 }
2231
2232 static int
bridge_ioctl_sflags(struct bridge_softc * sc,void * arg)2233 bridge_ioctl_sflags(struct bridge_softc *sc, void *arg)
2234 {
2235 struct ifbrparam *param = arg;
2236
2237 sc->sc_flags = param->ifbrp_flags;
2238
2239 return (0);
2240 }
2241
2242 static int
bridge_ioctl_gdefpvid(struct bridge_softc * sc,void * arg)2243 bridge_ioctl_gdefpvid(struct bridge_softc *sc, void *arg)
2244 {
2245 struct ifbrparam *param = arg;
2246
2247 param->ifbrp_defpvid = sc->sc_defpvid;
2248
2249 return (0);
2250 }
2251
2252 static int
bridge_ioctl_sdefpvid(struct bridge_softc * sc,void * arg)2253 bridge_ioctl_sdefpvid(struct bridge_softc *sc, void *arg)
2254 {
2255 struct ifbrparam *param = arg;
2256
2257 /* Reject invalid VIDs, but allow 0 to mean 'none'. */
2258 if (param->ifbrp_defpvid > DOT1Q_VID_MAX)
2259 return (EINVAL);
2260
2261 sc->sc_defpvid = param->ifbrp_defpvid;
2262
2263 return (0);
2264 }
2265
2266 static int
bridge_ioctl_svlanproto(struct bridge_softc * sc,void * arg)2267 bridge_ioctl_svlanproto(struct bridge_softc *sc, void *arg)
2268 {
2269 struct ifbreq *req = arg;
2270 struct bridge_iflist *bif;
2271
2272 bif = bridge_lookup_member(sc, req->ifbr_ifsname);
2273 if (bif == NULL)
2274 return (EXTERROR(ENOENT, "Interface is not a bridge member"));
2275
2276 if (req->ifbr_vlanproto != ETHERTYPE_VLAN &&
2277 req->ifbr_vlanproto != ETHERTYPE_QINQ)
2278 return (EXTERROR(EINVAL, "Invalid VLAN protocol"));
2279
2280 bif->bif_vlanproto = req->ifbr_vlanproto;
2281
2282 return (0);
2283 }
2284 /*
2285 * bridge_ifdetach:
2286 *
2287 * Detach an interface from a bridge. Called when a member
2288 * interface is detaching.
2289 */
2290 static void
bridge_ifdetach(void * arg __unused,struct ifnet * ifp)2291 bridge_ifdetach(void *arg __unused, struct ifnet *ifp)
2292 {
2293 struct bridge_iflist *bif = ifp->if_bridge;
2294 struct bridge_softc *sc = NULL;
2295
2296 if (bif)
2297 sc = bif->bif_sc;
2298
2299 if (ifp->if_flags & IFF_RENAMING)
2300 return;
2301 if (V_bridge_cloner == NULL) {
2302 /*
2303 * This detach handler can be called after
2304 * vnet_bridge_uninit(). Just return in that case.
2305 */
2306 return;
2307 }
2308 /* Check if the interface is a bridge member */
2309 if (sc != NULL) {
2310 BRIDGE_LOCK(sc);
2311 bridge_delete_member(sc, bif, 1);
2312 BRIDGE_UNLOCK(sc);
2313 return;
2314 }
2315
2316 /* Check if the interface is a span port */
2317 BRIDGE_LIST_LOCK();
2318 LIST_FOREACH(sc, &V_bridge_list, sc_list) {
2319 BRIDGE_LOCK(sc);
2320 CK_LIST_FOREACH(bif, &sc->sc_spanlist, bif_next)
2321 if (ifp == bif->bif_ifp) {
2322 bridge_delete_span(sc, bif);
2323 break;
2324 }
2325
2326 BRIDGE_UNLOCK(sc);
2327 }
2328 BRIDGE_LIST_UNLOCK();
2329 }
2330
2331 /*
2332 * bridge_init:
2333 *
2334 * Initialize a bridge interface.
2335 */
2336 static void
bridge_init(void * xsc)2337 bridge_init(void *xsc)
2338 {
2339 struct bridge_softc *sc = (struct bridge_softc *)xsc;
2340 struct ifnet *ifp = sc->sc_ifp;
2341
2342 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
2343 return;
2344
2345 BRIDGE_LOCK(sc);
2346 callout_reset(&sc->sc_brcallout, bridge_rtable_prune_period * hz,
2347 bridge_timer, sc);
2348
2349 ifp->if_drv_flags |= IFF_DRV_RUNNING;
2350 bstp_init(&sc->sc_stp); /* Initialize Spanning Tree */
2351
2352 BRIDGE_UNLOCK(sc);
2353 }
2354
2355 /*
2356 * bridge_stop:
2357 *
2358 * Stop the bridge interface.
2359 */
2360 static void
bridge_stop(struct ifnet * ifp,int disable)2361 bridge_stop(struct ifnet *ifp, int disable)
2362 {
2363 struct bridge_softc *sc = ifp->if_softc;
2364
2365 BRIDGE_LOCK_ASSERT(sc);
2366
2367 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
2368 return;
2369
2370 BRIDGE_RT_LOCK(sc);
2371 callout_stop(&sc->sc_brcallout);
2372
2373 bstp_stop(&sc->sc_stp);
2374
2375 bridge_rtflush(sc, IFBF_FLUSHDYN);
2376 BRIDGE_RT_UNLOCK(sc);
2377
2378 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2379 }
2380
2381 /*
2382 * bridge_enqueue:
2383 *
2384 * Enqueue a packet on a bridge member interface.
2385 *
2386 */
2387 static int
bridge_enqueue(struct bridge_softc * sc,struct ifnet * dst_ifp,struct mbuf * m,struct bridge_iflist * bif)2388 bridge_enqueue(struct bridge_softc *sc, struct ifnet *dst_ifp, struct mbuf *m,
2389 struct bridge_iflist *bif)
2390 {
2391 int len, err = 0;
2392 short mflags;
2393 struct mbuf *m0;
2394
2395 /*
2396 * Find the bridge member port this packet is being sent on, if the
2397 * caller didn't already provide it.
2398 */
2399 if (bif == NULL)
2400 bif = bridge_lookup_member_if(sc, dst_ifp);
2401 if (bif == NULL) {
2402 /* Perhaps the interface was removed from the bridge */
2403 m_freem(m);
2404 return (EINVAL);
2405 }
2406
2407 /* We may be sending a fragment so traverse the mbuf */
2408 for (; m; m = m0) {
2409 m0 = m->m_nextpkt;
2410 m->m_nextpkt = NULL;
2411 len = m->m_pkthdr.len;
2412 mflags = m->m_flags;
2413
2414 /*
2415 * If the native VLAN ID of the outgoing interface matches the
2416 * VLAN ID of the frame, remove the VLAN tag.
2417 */
2418 if (bif->bif_pvid != DOT1Q_VID_NULL &&
2419 VLANTAGOF(m) == bif->bif_pvid) {
2420 m->m_flags &= ~M_VLANTAG;
2421 m->m_pkthdr.ether_vtag = 0;
2422 }
2423
2424 /*
2425 * There are two cases where we have to insert our own tag:
2426 * if the member interface doesn't support hardware tagging,
2427 * or if the tag proto is not 802.1q.
2428 */
2429 if ((m->m_flags & M_VLANTAG) &&
2430 ((dst_ifp->if_capenable & IFCAP_VLAN_HWTAGGING) == 0 ||
2431 bif->bif_vlanproto != ETHERTYPE_VLAN)) {
2432 m = ether_vlanencap_proto(m, m->m_pkthdr.ether_vtag,
2433 bif->bif_vlanproto);
2434 if (m == NULL) {
2435 if_printf(dst_ifp,
2436 "unable to prepend VLAN header\n");
2437 if_inc_counter(dst_ifp, IFCOUNTER_OERRORS, 1);
2438 continue;
2439 }
2440 m->m_flags &= ~M_VLANTAG;
2441 }
2442
2443 M_ASSERTPKTHDR(m); /* We shouldn't transmit mbuf without pkthdr */
2444 /*
2445 * XXXZL: gif(4) requires the af to be saved in csum_data field
2446 * so that gif_transmit() routine can pull it back.
2447 */
2448 if (dst_ifp->if_type == IFT_GIF)
2449 m->m_pkthdr.csum_data = AF_LINK;
2450 if ((err = dst_ifp->if_transmit(dst_ifp, m))) {
2451 int n;
2452
2453 for (m = m0, n = 1; m != NULL; m = m0, n++) {
2454 m0 = m->m_nextpkt;
2455 m_freem(m);
2456 }
2457 if_inc_counter(sc->sc_ifp, IFCOUNTER_OERRORS, n);
2458 break;
2459 }
2460
2461 if_inc_counter(sc->sc_ifp, IFCOUNTER_OPACKETS, 1);
2462 if_inc_counter(sc->sc_ifp, IFCOUNTER_OBYTES, len);
2463 if (mflags & M_MCAST)
2464 if_inc_counter(sc->sc_ifp, IFCOUNTER_OMCASTS, 1);
2465 }
2466
2467 return (err);
2468 }
2469
2470 /*
2471 * bridge_dummynet:
2472 *
2473 * Receive a queued packet from dummynet and pass it on to the output
2474 * interface.
2475 *
2476 * The mbuf has the Ethernet header already attached.
2477 */
2478 static void
bridge_dummynet(struct mbuf * m,struct ifnet * ifp)2479 bridge_dummynet(struct mbuf *m, struct ifnet *ifp)
2480 {
2481 struct bridge_iflist *bif = ifp->if_bridge;
2482 struct bridge_softc *sc = NULL;
2483
2484 if (bif)
2485 sc = bif->bif_sc;
2486
2487 /*
2488 * The packet didnt originate from a member interface. This should only
2489 * ever happen if a member interface is removed while packets are
2490 * queued for it.
2491 */
2492 if (sc == NULL) {
2493 m_freem(m);
2494 return;
2495 }
2496
2497 if (PFIL_HOOKED_OUT_46) {
2498 if (bridge_pfil(&m, sc->sc_ifp, ifp, PFIL_OUT) != 0)
2499 return;
2500 if (m == NULL)
2501 return;
2502 }
2503
2504 bridge_enqueue(sc, ifp, m, NULL);
2505 }
2506
2507 /*
2508 * bridge_output:
2509 *
2510 * Send output from a bridge member interface. This
2511 * performs the bridging function for locally originated
2512 * packets.
2513 *
2514 * The mbuf has the Ethernet header already attached. We must
2515 * enqueue or free the mbuf before returning.
2516 */
2517 static int
bridge_output(struct ifnet * ifp,struct mbuf * m,struct sockaddr * sa,struct rtentry * rt)2518 bridge_output(struct ifnet *ifp, struct mbuf *m, struct sockaddr *sa,
2519 struct rtentry *rt)
2520 {
2521 struct ether_header *eh;
2522 struct bridge_iflist *sbif;
2523 struct ifnet *bifp, *dst_if;
2524 struct bridge_softc *sc;
2525 ether_vlanid_t vlan;
2526
2527 NET_EPOCH_ASSERT();
2528
2529 if (m->m_len < ETHER_HDR_LEN) {
2530 m = m_pullup(m, ETHER_HDR_LEN);
2531 if (m == NULL)
2532 return (0);
2533 }
2534
2535 sbif = ifp->if_bridge;
2536 sc = sbif->bif_sc;
2537 bifp = sc->sc_ifp;
2538
2539 eh = mtod(m, struct ether_header *);
2540 vlan = VLANTAGOF(m);
2541
2542 /*
2543 * If bridge is down, but the original output interface is up,
2544 * go ahead and send out that interface. Otherwise, the packet
2545 * is dropped below.
2546 */
2547 if ((bifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
2548 dst_if = ifp;
2549 goto sendunicast;
2550 }
2551
2552 /*
2553 * If the packet is a multicast, or we don't know a better way to
2554 * get there, send to all interfaces.
2555 */
2556 if (ETHER_IS_MULTICAST(eh->ether_dhost))
2557 dst_if = NULL;
2558 else
2559 dst_if = bridge_rtlookup(sc, eh->ether_dhost, vlan);
2560 /* Tap any traffic not passing back out the originating interface */
2561 if (dst_if != ifp)
2562 ETHER_BPF_MTAP(bifp, m);
2563 if (dst_if == NULL) {
2564 struct bridge_iflist *bif;
2565 struct mbuf *mc;
2566 int used = 0;
2567
2568 bridge_span(sc, m);
2569
2570 CK_LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
2571 dst_if = bif->bif_ifp;
2572
2573 if (dst_if->if_type == IFT_GIF)
2574 continue;
2575 if ((dst_if->if_drv_flags & IFF_DRV_RUNNING) == 0)
2576 continue;
2577
2578 /*
2579 * If this is not the original output interface,
2580 * and the interface is participating in spanning
2581 * tree, make sure the port is in a state that
2582 * allows forwarding.
2583 */
2584 if (dst_if != ifp && (bif->bif_flags & IFBIF_STP) &&
2585 bif->bif_stp.bp_state == BSTP_IFSTATE_DISCARDING)
2586 continue;
2587
2588 if (CK_LIST_NEXT(bif, bif_next) == NULL) {
2589 used = 1;
2590 mc = m;
2591 } else {
2592 mc = m_dup(m, M_NOWAIT);
2593 if (mc == NULL) {
2594 if_inc_counter(bifp, IFCOUNTER_OERRORS, 1);
2595 continue;
2596 }
2597 }
2598
2599 bridge_enqueue(sc, dst_if, mc, bif);
2600 }
2601 if (used == 0)
2602 m_freem(m);
2603 return (0);
2604 }
2605
2606 sendunicast:
2607 /*
2608 * XXX Spanning tree consideration here?
2609 */
2610
2611 bridge_span(sc, m);
2612 if ((dst_if->if_drv_flags & IFF_DRV_RUNNING) == 0) {
2613 m_freem(m);
2614 return (0);
2615 }
2616
2617 bridge_enqueue(sc, dst_if, m, NULL);
2618 return (0);
2619 }
2620
2621 /*
2622 * bridge_transmit:
2623 *
2624 * Do output on a bridge.
2625 *
2626 */
2627 static int
bridge_transmit(struct ifnet * ifp,struct mbuf * m)2628 bridge_transmit(struct ifnet *ifp, struct mbuf *m)
2629 {
2630 struct bridge_softc *sc;
2631 struct ether_header *eh;
2632 struct ifnet *dst_if;
2633 int error = 0;
2634 ether_vlanid_t vlan;
2635
2636 sc = ifp->if_softc;
2637
2638 ETHER_BPF_MTAP(ifp, m);
2639
2640 eh = mtod(m, struct ether_header *);
2641 vlan = VLANTAGOF(m);
2642
2643 if (((m->m_flags & (M_BCAST|M_MCAST)) == 0) &&
2644 (dst_if = bridge_rtlookup(sc, eh->ether_dhost, vlan)) != NULL) {
2645 error = bridge_enqueue(sc, dst_if, m, NULL);
2646 } else
2647 bridge_broadcast(sc, ifp, m, 0);
2648
2649 return (error);
2650 }
2651
2652 #ifdef ALTQ
2653 static void
bridge_altq_start(if_t ifp)2654 bridge_altq_start(if_t ifp)
2655 {
2656 struct ifaltq *ifq = &ifp->if_snd;
2657 struct mbuf *m;
2658
2659 IFQ_LOCK(ifq);
2660 IFQ_DEQUEUE_NOLOCK(ifq, m);
2661 while (m != NULL) {
2662 bridge_transmit(ifp, m);
2663 IFQ_DEQUEUE_NOLOCK(ifq, m);
2664 }
2665 IFQ_UNLOCK(ifq);
2666 }
2667
2668 static int
bridge_altq_transmit(if_t ifp,struct mbuf * m)2669 bridge_altq_transmit(if_t ifp, struct mbuf *m)
2670 {
2671 int err;
2672
2673 if (ALTQ_IS_ENABLED(&ifp->if_snd)) {
2674 IFQ_ENQUEUE(&ifp->if_snd, m, err);
2675 if (err == 0)
2676 bridge_altq_start(ifp);
2677 } else
2678 err = bridge_transmit(ifp, m);
2679
2680 return (err);
2681 }
2682 #endif /* ALTQ */
2683
2684 /*
2685 * The ifp->if_qflush entry point for if_bridge(4) is no-op.
2686 */
2687 static void
bridge_qflush(struct ifnet * ifp __unused)2688 bridge_qflush(struct ifnet *ifp __unused)
2689 {
2690 }
2691
2692 /*
2693 * bridge_forward:
2694 *
2695 * The forwarding function of the bridge.
2696 *
2697 * NOTE: Releases the lock on return.
2698 */
2699 static void
bridge_forward(struct bridge_softc * sc,struct bridge_iflist * sbif,struct mbuf * m)2700 bridge_forward(struct bridge_softc *sc, struct bridge_iflist *sbif,
2701 struct mbuf *m)
2702 {
2703 struct bridge_iflist *dbif;
2704 struct ifnet *src_if, *dst_if, *ifp;
2705 struct ether_header *eh;
2706 uint8_t *dst;
2707 int error;
2708 ether_vlanid_t vlan;
2709
2710 NET_EPOCH_ASSERT();
2711
2712 src_if = m->m_pkthdr.rcvif;
2713 ifp = sc->sc_ifp;
2714 vlan = VLANTAGOF(m);
2715
2716 if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
2717 if_inc_counter(ifp, IFCOUNTER_IBYTES, m->m_pkthdr.len);
2718
2719 if ((sbif->bif_flags & IFBIF_STP) &&
2720 sbif->bif_stp.bp_state == BSTP_IFSTATE_DISCARDING)
2721 goto drop;
2722
2723 eh = mtod(m, struct ether_header *);
2724 dst = eh->ether_dhost;
2725
2726 /* If the interface is learning, record the address. */
2727 if (sbif->bif_flags & IFBIF_LEARNING) {
2728 error = bridge_rtupdate(sc, eh->ether_shost, vlan,
2729 sbif, 0, IFBAF_DYNAMIC);
2730 /*
2731 * If the interface has addresses limits then deny any source
2732 * that is not in the cache.
2733 */
2734 if (error && sbif->bif_addrmax)
2735 goto drop;
2736 }
2737
2738 if ((sbif->bif_flags & IFBIF_STP) != 0 &&
2739 sbif->bif_stp.bp_state == BSTP_IFSTATE_LEARNING)
2740 goto drop;
2741
2742 #ifdef DEV_NETMAP
2743 /*
2744 * Hand the packet to netmap only if it wasn't injected by netmap
2745 * itself.
2746 */
2747 if ((m->m_flags & M_BRIDGE_INJECT) == 0 &&
2748 (if_getcapenable(ifp) & IFCAP_NETMAP) != 0) {
2749 ifp->if_input(ifp, m);
2750 return;
2751 }
2752 m->m_flags &= ~M_BRIDGE_INJECT;
2753 #endif
2754
2755 /*
2756 * At this point, the port either doesn't participate
2757 * in spanning tree or it is in the forwarding state.
2758 */
2759
2760 /*
2761 * If the packet is unicast, destined for someone on
2762 * "this" side of the bridge, drop it.
2763 */
2764 if ((m->m_flags & (M_BCAST|M_MCAST)) == 0) {
2765 dst_if = bridge_rtlookup(sc, dst, vlan);
2766 if (src_if == dst_if)
2767 goto drop;
2768 } else {
2769 /*
2770 * Check if its a reserved multicast address, any address
2771 * listed in 802.1D section 7.12.6 may not be forwarded by the
2772 * bridge.
2773 * This is currently 01-80-C2-00-00-00 to 01-80-C2-00-00-0F
2774 */
2775 if (dst[0] == 0x01 && dst[1] == 0x80 &&
2776 dst[2] == 0xc2 && dst[3] == 0x00 &&
2777 dst[4] == 0x00 && dst[5] <= 0x0f)
2778 goto drop;
2779
2780 /* ...forward it to all interfaces. */
2781 if_inc_counter(ifp, IFCOUNTER_IMCASTS, 1);
2782 dst_if = NULL;
2783 }
2784
2785 /*
2786 * If we have a destination interface which is a member of our bridge,
2787 * OR this is a unicast packet, push it through the bpf(4) machinery.
2788 * For broadcast or multicast packets, don't bother because it will
2789 * be reinjected into ether_input. We do this before we pass the packets
2790 * through the pfil(9) framework, as it is possible that pfil(9) will
2791 * drop the packet, or possibly modify it, making it difficult to debug
2792 * firewall issues on the bridge.
2793 */
2794 if (dst_if != NULL || (m->m_flags & (M_BCAST | M_MCAST)) == 0)
2795 ETHER_BPF_MTAP(ifp, m);
2796
2797 /* run the packet filter */
2798 if (PFIL_HOOKED_IN_46) {
2799 if (bridge_pfil(&m, ifp, src_if, PFIL_IN) != 0)
2800 return;
2801 if (m == NULL)
2802 return;
2803 }
2804
2805 if (dst_if == NULL) {
2806 bridge_broadcast(sc, src_if, m, 1);
2807 return;
2808 }
2809
2810 /*
2811 * At this point, we're dealing with a unicast frame
2812 * going to a different interface.
2813 */
2814 if ((dst_if->if_drv_flags & IFF_DRV_RUNNING) == 0)
2815 goto drop;
2816
2817 dbif = bridge_lookup_member_if(sc, dst_if);
2818 if (dbif == NULL)
2819 /* Not a member of the bridge (anymore?) */
2820 goto drop;
2821
2822 /* Private segments can not talk to each other */
2823 if (sbif->bif_flags & dbif->bif_flags & IFBIF_PRIVATE)
2824 goto drop;
2825
2826 /* Do VLAN filtering. */
2827 if (!bridge_vfilter_out(dbif, m))
2828 goto drop;
2829
2830 if ((dbif->bif_flags & IFBIF_STP) &&
2831 dbif->bif_stp.bp_state == BSTP_IFSTATE_DISCARDING)
2832 goto drop;
2833
2834 if (PFIL_HOOKED_OUT_46) {
2835 if (bridge_pfil(&m, ifp, dst_if, PFIL_OUT) != 0)
2836 return;
2837 if (m == NULL)
2838 return;
2839 }
2840
2841 bridge_enqueue(sc, dst_if, m, dbif);
2842 return;
2843
2844 drop:
2845 m_freem(m);
2846 }
2847
2848 /*
2849 * bridge_input:
2850 *
2851 * Receive input from a member interface. Queue the packet for
2852 * bridging if it is not for us.
2853 */
2854 static struct mbuf *
bridge_input(struct ifnet * ifp,struct mbuf * m)2855 bridge_input(struct ifnet *ifp, struct mbuf *m)
2856 {
2857 struct bridge_softc *sc = NULL;
2858 struct bridge_iflist *bif, *bif2;
2859 struct ifnet *bifp;
2860 struct ether_header *eh;
2861 struct mbuf *mc, *mc2;
2862 ether_vlanid_t vlan;
2863 int error;
2864
2865 NET_EPOCH_ASSERT();
2866
2867 /* We need the Ethernet header later, so make sure we have it now. */
2868 if (m->m_len < ETHER_HDR_LEN) {
2869 m = m_pullup(m, ETHER_HDR_LEN);
2870 if (m == NULL) {
2871 if_inc_counter(sc->sc_ifp, IFCOUNTER_IERRORS, 1);
2872 m_freem(m);
2873 return (NULL);
2874 }
2875 }
2876
2877 eh = mtod(m, struct ether_header *);
2878 vlan = VLANTAGOF(m);
2879
2880 /*
2881 * If this frame has a VLAN tag and the receiving interface has a
2882 * vlan(4) trunk, then it is is destined for vlan(4), not for us.
2883 * This means if vlan(4) and bridge(4) are configured on the same
2884 * interface, vlan(4) is preferred, which is what users typically
2885 * expect.
2886 */
2887 if (vlan != DOT1Q_VID_NULL && ifp->if_vlantrunk != NULL)
2888 return (m);
2889
2890 bif = ifp->if_bridge;
2891 if (bif)
2892 sc = bif->bif_sc;
2893
2894 if (sc == NULL) {
2895 /*
2896 * This packet originated from the bridge itself, so it must
2897 * have been transmitted by netmap. Derive the "source"
2898 * interface from the source address and drop the packet if the
2899 * source address isn't known.
2900 */
2901 KASSERT((m->m_flags & M_BRIDGE_INJECT) != 0,
2902 ("%s: ifnet %p missing a bridge softc", __func__, ifp));
2903 sc = if_getsoftc(ifp);
2904 ifp = bridge_rtlookup(sc, eh->ether_shost, vlan);
2905 if (ifp == NULL) {
2906 if_inc_counter(sc->sc_ifp, IFCOUNTER_IERRORS, 1);
2907 m_freem(m);
2908 return (NULL);
2909 }
2910 m->m_pkthdr.rcvif = ifp;
2911 }
2912 bifp = sc->sc_ifp;
2913 if ((bifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
2914 return (m);
2915
2916 /*
2917 * Implement support for bridge monitoring. If this flag has been
2918 * set on this interface, discard the packet once we push it through
2919 * the bpf(4) machinery, but before we do, increment the byte and
2920 * packet counters associated with this interface.
2921 */
2922 if ((bifp->if_flags & IFF_MONITOR) != 0) {
2923 m->m_pkthdr.rcvif = bifp;
2924 ETHER_BPF_MTAP(bifp, m);
2925 if_inc_counter(bifp, IFCOUNTER_IPACKETS, 1);
2926 if_inc_counter(bifp, IFCOUNTER_IBYTES, m->m_pkthdr.len);
2927 m_freem(m);
2928 return (NULL);
2929 }
2930
2931 /* Do VLAN filtering. */
2932 if (!bridge_vfilter_in(bif, m)) {
2933 if_inc_counter(sc->sc_ifp, IFCOUNTER_IERRORS, 1);
2934 m_freem(m);
2935 return (NULL);
2936 }
2937 /* bridge_vfilter_in() may add a tag */
2938 vlan = VLANTAGOF(m);
2939
2940 bridge_span(sc, m);
2941
2942 if (m->m_flags & (M_BCAST|M_MCAST)) {
2943 /* Tap off 802.1D packets; they do not get forwarded. */
2944 if (memcmp(eh->ether_dhost, bstp_etheraddr,
2945 ETHER_ADDR_LEN) == 0) {
2946 bstp_input(&bif->bif_stp, ifp, m); /* consumes mbuf */
2947 return (NULL);
2948 }
2949
2950 if ((bif->bif_flags & IFBIF_STP) &&
2951 bif->bif_stp.bp_state == BSTP_IFSTATE_DISCARDING) {
2952 return (m);
2953 }
2954
2955 /*
2956 * Make a deep copy of the packet and enqueue the copy
2957 * for bridge processing; return the original packet for
2958 * local processing.
2959 */
2960 mc = m_dup(m, M_NOWAIT);
2961 if (mc == NULL) {
2962 return (m);
2963 }
2964
2965 /* Perform the bridge forwarding function with the copy. */
2966 bridge_forward(sc, bif, mc);
2967
2968 #ifdef DEV_NETMAP
2969 /*
2970 * If netmap is enabled and has not already seen this packet,
2971 * then it will be consumed by bridge_forward().
2972 */
2973 if ((if_getcapenable(bifp) & IFCAP_NETMAP) != 0 &&
2974 (m->m_flags & M_BRIDGE_INJECT) == 0) {
2975 m_freem(m);
2976 return (NULL);
2977 }
2978 #endif
2979
2980 /*
2981 * Reinject the mbuf as arriving on the bridge so we have a
2982 * chance at claiming multicast packets. We can not loop back
2983 * here from ether_input as a bridge is never a member of a
2984 * bridge.
2985 */
2986 KASSERT(bifp->if_bridge == NULL,
2987 ("loop created in bridge_input"));
2988 mc2 = m_dup(m, M_NOWAIT);
2989 if (mc2 != NULL) {
2990 /* Keep the layer3 header aligned */
2991 int i = min(mc2->m_pkthdr.len, max_protohdr);
2992 mc2 = m_copyup(mc2, i, ETHER_ALIGN);
2993 }
2994 if (mc2 != NULL) {
2995 mc2->m_pkthdr.rcvif = bifp;
2996 mc2->m_flags &= ~M_BRIDGE_INJECT;
2997 sc->sc_if_input(bifp, mc2);
2998 }
2999
3000 /* Return the original packet for local processing. */
3001 return (m);
3002 }
3003
3004 if ((bif->bif_flags & IFBIF_STP) &&
3005 bif->bif_stp.bp_state == BSTP_IFSTATE_DISCARDING) {
3006 return (m);
3007 }
3008
3009 #if defined(INET) || defined(INET6)
3010 #define CARP_CHECK_WE_ARE_DST(iface) \
3011 ((iface)->if_carp && (*carp_forus_p)((iface), eh->ether_dhost))
3012 #define CARP_CHECK_WE_ARE_SRC(iface) \
3013 ((iface)->if_carp && (*carp_forus_p)((iface), eh->ether_shost))
3014 #else
3015 #define CARP_CHECK_WE_ARE_DST(iface) false
3016 #define CARP_CHECK_WE_ARE_SRC(iface) false
3017 #endif
3018
3019 #ifdef DEV_NETMAP
3020 #define GRAB_FOR_NETMAP(ifp, m) do { \
3021 if ((if_getcapenable(ifp) & IFCAP_NETMAP) != 0 && \
3022 ((m)->m_flags & M_BRIDGE_INJECT) == 0) { \
3023 (ifp)->if_input(ifp, m); \
3024 return (NULL); \
3025 } \
3026 } while (0)
3027 #else
3028 #define GRAB_FOR_NETMAP(ifp, m)
3029 #endif
3030
3031 #define GRAB_OUR_PACKETS(iface) \
3032 if ((iface)->if_type == IFT_GIF) \
3033 continue; \
3034 /* It is destined for us. */ \
3035 if (memcmp(IF_LLADDR(iface), eh->ether_dhost, ETHER_ADDR_LEN) == 0 || \
3036 CARP_CHECK_WE_ARE_DST(iface)) { \
3037 if (bif->bif_flags & IFBIF_LEARNING) { \
3038 error = bridge_rtupdate(sc, eh->ether_shost, \
3039 vlan, bif, 0, IFBAF_DYNAMIC); \
3040 if (error && bif->bif_addrmax) { \
3041 m_freem(m); \
3042 return (NULL); \
3043 } \
3044 } \
3045 m->m_pkthdr.rcvif = iface; \
3046 if ((iface) == ifp) { \
3047 /* Skip bridge processing... src == dest */ \
3048 return (m); \
3049 } \
3050 /* It's passing over or to the bridge, locally. */ \
3051 ETHER_BPF_MTAP(bifp, m); \
3052 if_inc_counter(bifp, IFCOUNTER_IPACKETS, 1); \
3053 if_inc_counter(bifp, IFCOUNTER_IBYTES, m->m_pkthdr.len);\
3054 /* Hand the packet over to netmap if necessary. */ \
3055 GRAB_FOR_NETMAP(bifp, m); \
3056 /* Filter on the physical interface. */ \
3057 if (V_pfil_local_phys && PFIL_HOOKED_IN_46) { \
3058 if (bridge_pfil(&m, NULL, ifp, \
3059 PFIL_IN) != 0 || m == NULL) { \
3060 return (NULL); \
3061 } \
3062 } \
3063 if ((iface) != bifp) \
3064 ETHER_BPF_MTAP(iface, m); \
3065 /* Pass tagged packets to if_vlan, if it's loaded */ \
3066 if (VLANTAGOF(m) != 0) { \
3067 if (bifp->if_vlantrunk == NULL) { \
3068 m_freem(m); \
3069 return (NULL); \
3070 } \
3071 (*vlan_input_p)(bifp, m); \
3072 return (NULL); \
3073 } \
3074 return (m); \
3075 } \
3076 \
3077 /* We just received a packet that we sent out. */ \
3078 if (memcmp(IF_LLADDR(iface), eh->ether_shost, ETHER_ADDR_LEN) == 0 || \
3079 CARP_CHECK_WE_ARE_SRC(iface)) { \
3080 m_freem(m); \
3081 return (NULL); \
3082 }
3083
3084 /*
3085 * Unicast. Make sure it's not for the bridge.
3086 */
3087 do { GRAB_OUR_PACKETS(bifp) } while (0);
3088
3089 /*
3090 * If member_ifaddrs is enabled, see if the packet is destined for
3091 * one of the members' addresses.
3092 */
3093 if (V_member_ifaddrs) {
3094 /* Check the interface the packet arrived on. */
3095 do { GRAB_OUR_PACKETS(ifp) } while (0);
3096
3097 CK_LIST_FOREACH(bif2, &sc->sc_iflist, bif_next) {
3098 GRAB_OUR_PACKETS(bif2->bif_ifp)
3099 }
3100 }
3101
3102 #undef CARP_CHECK_WE_ARE_DST
3103 #undef CARP_CHECK_WE_ARE_SRC
3104 #undef GRAB_FOR_NETMAP
3105 #undef GRAB_OUR_PACKETS
3106
3107 /* Perform the bridge forwarding function. */
3108 bridge_forward(sc, bif, m);
3109
3110 return (NULL);
3111 }
3112
3113 /*
3114 * Inject a packet back into the host ethernet stack. This will generally only
3115 * be used by netmap when an application writes to the host TX ring. The
3116 * M_BRIDGE_INJECT flag ensures that the packet is re-routed to the bridge
3117 * interface after ethernet processing.
3118 */
3119 static void
bridge_inject(struct ifnet * ifp,struct mbuf * m)3120 bridge_inject(struct ifnet *ifp, struct mbuf *m)
3121 {
3122 struct bridge_softc *sc;
3123
3124 if (ifp->if_type == IFT_L2VLAN) {
3125 /*
3126 * vlan(4) gives us the vlan ifnet, so we need to get the
3127 * bridge softc to get a pointer to ether_input to send the
3128 * packet to.
3129 */
3130 struct ifnet *bifp = NULL;
3131
3132 if (vlan_trunkdev_p == NULL) {
3133 m_freem(m);
3134 return;
3135 }
3136
3137 bifp = vlan_trunkdev_p(ifp);
3138 if (bifp == NULL) {
3139 m_freem(m);
3140 return;
3141 }
3142
3143 sc = if_getsoftc(bifp);
3144 sc->sc_if_input(ifp, m);
3145 return;
3146 }
3147
3148 KASSERT((if_getcapenable(ifp) & IFCAP_NETMAP) != 0,
3149 ("%s: iface %s is not running in netmap mode",
3150 __func__, if_name(ifp)));
3151 KASSERT((m->m_flags & M_BRIDGE_INJECT) == 0,
3152 ("%s: mbuf %p has M_BRIDGE_INJECT set", __func__, m));
3153
3154 m->m_flags |= M_BRIDGE_INJECT;
3155 sc = if_getsoftc(ifp);
3156 sc->sc_if_input(ifp, m);
3157 }
3158
3159 /*
3160 * bridge_broadcast:
3161 *
3162 * Send a frame to all interfaces that are members of
3163 * the bridge, except for the one on which the packet
3164 * arrived.
3165 *
3166 * NOTE: Releases the lock on return.
3167 */
3168 static void
bridge_broadcast(struct bridge_softc * sc,struct ifnet * src_if,struct mbuf * m,int runfilt)3169 bridge_broadcast(struct bridge_softc *sc, struct ifnet *src_if,
3170 struct mbuf *m, int runfilt)
3171 {
3172 struct bridge_iflist *dbif, *sbif;
3173 struct mbuf *mc;
3174 struct ifnet *dst_if;
3175 int used = 0, i;
3176
3177 NET_EPOCH_ASSERT();
3178
3179 sbif = bridge_lookup_member_if(sc, src_if);
3180
3181 /* Filter on the bridge interface before broadcasting */
3182 if (runfilt && PFIL_HOOKED_OUT_46) {
3183 if (bridge_pfil(&m, sc->sc_ifp, NULL, PFIL_OUT) != 0)
3184 return;
3185 if (m == NULL)
3186 return;
3187 }
3188
3189 CK_LIST_FOREACH(dbif, &sc->sc_iflist, bif_next) {
3190 dst_if = dbif->bif_ifp;
3191 if (dst_if == src_if)
3192 continue;
3193
3194 /* Private segments can not talk to each other */
3195 if (sbif && (sbif->bif_flags & dbif->bif_flags & IFBIF_PRIVATE))
3196 continue;
3197
3198 /* Do VLAN filtering. */
3199 if (!bridge_vfilter_out(dbif, m))
3200 continue;
3201
3202 if ((dbif->bif_flags & IFBIF_STP) &&
3203 dbif->bif_stp.bp_state == BSTP_IFSTATE_DISCARDING)
3204 continue;
3205
3206 if ((dbif->bif_flags & IFBIF_DISCOVER) == 0 &&
3207 (m->m_flags & (M_BCAST|M_MCAST)) == 0)
3208 continue;
3209
3210 if ((dst_if->if_drv_flags & IFF_DRV_RUNNING) == 0)
3211 continue;
3212
3213 if (CK_LIST_NEXT(dbif, bif_next) == NULL) {
3214 mc = m;
3215 used = 1;
3216 } else {
3217 mc = m_dup(m, M_NOWAIT);
3218 if (mc == NULL) {
3219 if_inc_counter(sc->sc_ifp, IFCOUNTER_OERRORS, 1);
3220 continue;
3221 }
3222 }
3223
3224 /*
3225 * Filter on the output interface. Pass a NULL bridge interface
3226 * pointer so we do not redundantly filter on the bridge for
3227 * each interface we broadcast on.
3228 */
3229 if (runfilt && PFIL_HOOKED_OUT_46) {
3230 if (used == 0) {
3231 /* Keep the layer3 header aligned */
3232 i = min(mc->m_pkthdr.len, max_protohdr);
3233 mc = m_copyup(mc, i, ETHER_ALIGN);
3234 if (mc == NULL) {
3235 if_inc_counter(sc->sc_ifp, IFCOUNTER_OERRORS, 1);
3236 continue;
3237 }
3238 }
3239 if (bridge_pfil(&mc, NULL, dst_if, PFIL_OUT) != 0)
3240 continue;
3241 if (mc == NULL)
3242 continue;
3243 }
3244
3245 bridge_enqueue(sc, dst_if, mc, dbif);
3246 }
3247 if (used == 0)
3248 m_freem(m);
3249 }
3250
3251 /*
3252 * bridge_span:
3253 *
3254 * Duplicate a packet out one or more interfaces that are in span mode,
3255 * the original mbuf is unmodified.
3256 */
3257 static void
bridge_span(struct bridge_softc * sc,struct mbuf * m)3258 bridge_span(struct bridge_softc *sc, struct mbuf *m)
3259 {
3260 struct bridge_iflist *bif;
3261 struct ifnet *dst_if;
3262 struct mbuf *mc;
3263
3264 NET_EPOCH_ASSERT();
3265
3266 if (CK_LIST_EMPTY(&sc->sc_spanlist))
3267 return;
3268
3269 CK_LIST_FOREACH(bif, &sc->sc_spanlist, bif_next) {
3270 dst_if = bif->bif_ifp;
3271
3272 if ((dst_if->if_drv_flags & IFF_DRV_RUNNING) == 0)
3273 continue;
3274
3275 mc = m_dup(m, M_NOWAIT);
3276 if (mc == NULL) {
3277 if_inc_counter(sc->sc_ifp, IFCOUNTER_OERRORS, 1);
3278 continue;
3279 }
3280
3281 bridge_enqueue(sc, dst_if, mc, bif);
3282 }
3283 }
3284
3285 /*
3286 * Incoming VLAN filtering. Given a frame and the member interface it was
3287 * received on, decide whether the port configuration allows it.
3288 */
3289 static bool
bridge_vfilter_in(const struct bridge_iflist * sbif,struct mbuf * m)3290 bridge_vfilter_in(const struct bridge_iflist *sbif, struct mbuf *m)
3291 {
3292 ether_vlanid_t vlan;
3293
3294 vlan = VLANTAGOF(m);
3295 /* Make sure the vlan id is reasonable. */
3296 if (vlan > DOT1Q_VID_MAX)
3297 return (false);
3298
3299 /*
3300 * If VLAN filtering isn't enabled, pass everything, but add a tag
3301 * if the port has a pvid configured.
3302 */
3303 if ((sbif->bif_sc->sc_flags & IFBRF_VLANFILTER) == 0) {
3304 if (vlan == DOT1Q_VID_NULL &&
3305 sbif->bif_pvid != DOT1Q_VID_NULL) {
3306 m->m_pkthdr.ether_vtag = sbif->bif_pvid;
3307 m->m_flags |= M_VLANTAG;
3308 }
3309
3310 return (true);
3311 }
3312
3313 /* If Q-in-Q is disabled, check for stacked tags. */
3314 if ((sbif->bif_flags & IFBIF_QINQ) == 0) {
3315 struct ether_header *eh;
3316 uint16_t proto;
3317
3318 eh = mtod(m, struct ether_header *);
3319 proto = ntohs(eh->ether_type);
3320
3321 if (proto == ETHERTYPE_VLAN || proto == ETHERTYPE_QINQ)
3322 return (false);
3323 }
3324
3325 if (vlan == DOT1Q_VID_NULL) {
3326 /*
3327 * The frame doesn't have a tag. If the interface does not
3328 * have an untagged vlan configured, drop the frame.
3329 */
3330 if (sbif->bif_pvid == DOT1Q_VID_NULL)
3331 return (false);
3332
3333 /*
3334 * Otherwise, insert a new tag based on the interface's
3335 * untagged vlan id.
3336 */
3337 m->m_pkthdr.ether_vtag = sbif->bif_pvid;
3338 m->m_flags |= M_VLANTAG;
3339 } else {
3340 /*
3341 * The frame has a tag, so check it matches the interface's
3342 * vlan access list. We explicitly do not accept tagged
3343 * frames for the untagged vlan id here (unless it's also
3344 * in the access list).
3345 */
3346 if (!BRVLAN_TEST(&sbif->bif_vlan_set, vlan))
3347 return (false);
3348 }
3349
3350 /* Accept the frame. */
3351 return (true);
3352 }
3353
3354 /*
3355 * Outgoing VLAN filtering. Given a frame, its vlan, and the member interface
3356 * we intend to send it to, decide whether the port configuration allows it to
3357 * be sent.
3358 */
3359 static bool
bridge_vfilter_out(const struct bridge_iflist * dbif,const struct mbuf * m)3360 bridge_vfilter_out(const struct bridge_iflist *dbif, const struct mbuf *m)
3361 {
3362 struct ether_header *eh;
3363 ether_vlanid_t vlan;
3364
3365 NET_EPOCH_ASSERT();
3366
3367 /* If VLAN filtering isn't enabled, pass everything. */
3368 if ((dbif->bif_sc->sc_flags & IFBRF_VLANFILTER) == 0)
3369 return (true);
3370
3371 vlan = VLANTAGOF(m);
3372
3373 /*
3374 * Always allow untagged 802.1D STP frames, even if they would
3375 * otherwise be dropped. This is required for STP to work on
3376 * a filtering bridge.
3377 *
3378 * Tagged STP (Cisco PVST+) is a non-standard extension, so
3379 * handle those frames via the normal filtering path.
3380 */
3381 eh = mtod(m, struct ether_header *);
3382 if (vlan == DOT1Q_VID_NULL &&
3383 memcmp(eh->ether_dhost, bstp_etheraddr, ETHER_ADDR_LEN) == 0)
3384 return (true);
3385
3386 /*
3387 * If the frame wasn't assigned to a vlan at ingress, drop it.
3388 * We can't forward these frames to filtering ports because we
3389 * don't know what VLAN they're supposed to be in.
3390 */
3391 if (vlan == DOT1Q_VID_NULL)
3392 return (false);
3393
3394 /*
3395 * If the frame's vlan matches the interfaces's untagged vlan,
3396 * allow it.
3397 */
3398 if (vlan == dbif->bif_pvid)
3399 return (true);
3400
3401 /*
3402 * If the frame's vlan is on the interface's tagged access list,
3403 * allow it.
3404 */
3405 if (BRVLAN_TEST(&dbif->bif_vlan_set, vlan))
3406 return (true);
3407
3408 /* The frame was not permitted, so drop it. */
3409 return (false);
3410 }
3411
3412 /*
3413 * bridge_rtupdate:
3414 *
3415 * Add a bridge routing entry.
3416 */
3417 static int
bridge_rtupdate(struct bridge_softc * sc,const uint8_t * dst,ether_vlanid_t vlan,struct bridge_iflist * bif,int setflags,uint8_t flags)3418 bridge_rtupdate(struct bridge_softc *sc, const uint8_t *dst,
3419 ether_vlanid_t vlan, struct bridge_iflist *bif,
3420 int setflags, uint8_t flags)
3421 {
3422 struct bridge_rtnode *brt;
3423 struct bridge_iflist *obif;
3424 int error;
3425
3426 BRIDGE_LOCK_OR_NET_EPOCH_ASSERT(sc);
3427
3428 /* Check the source address is valid and not multicast. */
3429 if (ETHER_IS_MULTICAST(dst))
3430 return (EXTERROR(EINVAL, "Multicast address not permitted"));
3431 if (dst[0] == 0 && dst[1] == 0 && dst[2] == 0 &&
3432 dst[3] == 0 && dst[4] == 0 && dst[5] == 0)
3433 return (EXTERROR(EINVAL, "Zero address not permitted"));
3434
3435 /*
3436 * A route for this destination might already exist. If so,
3437 * update it, otherwise create a new one.
3438 */
3439 if ((brt = bridge_rtnode_lookup(sc, dst, vlan)) == NULL) {
3440 BRIDGE_RT_LOCK(sc);
3441
3442 /* Check again, now that we have the lock. There could have
3443 * been a race and we only want to insert this once. */
3444 if (bridge_rtnode_lookup(sc, dst, vlan) != NULL) {
3445 BRIDGE_RT_UNLOCK(sc);
3446 return (0);
3447 }
3448
3449 if (sc->sc_brtcnt >= sc->sc_brtmax) {
3450 sc->sc_brtexceeded++;
3451 BRIDGE_RT_UNLOCK(sc);
3452 return (EXTERROR(ENOSPC, "Address table is full"));
3453 }
3454 /* Check per interface address limits (if enabled) */
3455 if (bif->bif_addrmax && bif->bif_addrcnt >= bif->bif_addrmax) {
3456 bif->bif_addrexceeded++;
3457 BRIDGE_RT_UNLOCK(sc);
3458 return (EXTERROR(ENOSPC,
3459 "Interface address limit exceeded"));
3460 }
3461
3462 /*
3463 * Allocate a new bridge forwarding node, and
3464 * initialize the expiration time and Ethernet
3465 * address.
3466 */
3467 brt = uma_zalloc(V_bridge_rtnode_zone, M_NOWAIT | M_ZERO);
3468 if (brt == NULL) {
3469 BRIDGE_RT_UNLOCK(sc);
3470 return (EXTERROR(ENOMEM,
3471 "Cannot allocate address node"));
3472 }
3473 brt->brt_vnet = curvnet;
3474
3475 if (bif->bif_flags & IFBIF_STICKY)
3476 brt->brt_flags = IFBAF_STICKY;
3477 else
3478 brt->brt_flags = IFBAF_DYNAMIC;
3479
3480 memcpy(brt->brt_addr, dst, ETHER_ADDR_LEN);
3481 brt->brt_vlan = vlan;
3482
3483 brt->brt_dst = bif;
3484 if ((error = bridge_rtnode_insert(sc, brt)) != 0) {
3485 uma_zfree(V_bridge_rtnode_zone, brt);
3486 BRIDGE_RT_UNLOCK(sc);
3487 return (error);
3488 }
3489 bif->bif_addrcnt++;
3490
3491 BRIDGE_RT_UNLOCK(sc);
3492 }
3493
3494 if ((brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC &&
3495 (obif = brt->brt_dst) != bif) {
3496 MPASS(obif != NULL);
3497
3498 BRIDGE_RT_LOCK(sc);
3499 brt->brt_dst->bif_addrcnt--;
3500 brt->brt_dst = bif;
3501 brt->brt_dst->bif_addrcnt++;
3502 BRIDGE_RT_UNLOCK(sc);
3503
3504 if (V_log_mac_flap &&
3505 ppsratecheck(&V_log_last, &V_log_count, V_log_interval)) {
3506 log(LOG_NOTICE,
3507 "%s: mac address %6D vlan %d moved from %s to %s\n",
3508 sc->sc_ifp->if_xname,
3509 &brt->brt_addr[0], ":",
3510 brt->brt_vlan,
3511 obif->bif_ifp->if_xname,
3512 bif->bif_ifp->if_xname);
3513 }
3514 }
3515
3516 if ((flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC)
3517 brt->brt_expire = time_uptime + sc->sc_brttimeout;
3518 if (setflags)
3519 brt->brt_flags = flags;
3520
3521 return (0);
3522 }
3523
3524 /*
3525 * bridge_rtlookup:
3526 *
3527 * Lookup the destination interface for an address.
3528 */
3529 static struct ifnet *
bridge_rtlookup(struct bridge_softc * sc,const uint8_t * addr,ether_vlanid_t vlan)3530 bridge_rtlookup(struct bridge_softc *sc, const uint8_t *addr,
3531 ether_vlanid_t vlan)
3532 {
3533 struct bridge_rtnode *brt;
3534
3535 NET_EPOCH_ASSERT();
3536
3537 if ((brt = bridge_rtnode_lookup(sc, addr, vlan)) == NULL)
3538 return (NULL);
3539
3540 return (brt->brt_ifp);
3541 }
3542
3543 /*
3544 * bridge_rttrim:
3545 *
3546 * Trim the routine table so that we have a number
3547 * of routing entries less than or equal to the
3548 * maximum number.
3549 */
3550 static void
bridge_rttrim(struct bridge_softc * sc)3551 bridge_rttrim(struct bridge_softc *sc)
3552 {
3553 struct bridge_rtnode *brt, *nbrt;
3554
3555 NET_EPOCH_ASSERT();
3556 BRIDGE_RT_LOCK_ASSERT(sc);
3557
3558 /* Make sure we actually need to do this. */
3559 if (sc->sc_brtcnt <= sc->sc_brtmax)
3560 return;
3561
3562 /* Force an aging cycle; this might trim enough addresses. */
3563 bridge_rtage(sc);
3564 if (sc->sc_brtcnt <= sc->sc_brtmax)
3565 return;
3566
3567 CK_LIST_FOREACH_SAFE(brt, &sc->sc_rtlist, brt_list, nbrt) {
3568 if ((brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC) {
3569 bridge_rtnode_destroy(sc, brt);
3570 if (sc->sc_brtcnt <= sc->sc_brtmax)
3571 return;
3572 }
3573 }
3574 }
3575
3576 /*
3577 * bridge_timer:
3578 *
3579 * Aging timer for the bridge.
3580 */
3581 static void
bridge_timer(void * arg)3582 bridge_timer(void *arg)
3583 {
3584 struct bridge_softc *sc = arg;
3585
3586 BRIDGE_RT_LOCK_ASSERT(sc);
3587
3588 /* Destruction of rtnodes requires a proper vnet context */
3589 CURVNET_SET(sc->sc_ifp->if_vnet);
3590 bridge_rtage(sc);
3591
3592 if (sc->sc_ifp->if_drv_flags & IFF_DRV_RUNNING)
3593 callout_reset(&sc->sc_brcallout,
3594 bridge_rtable_prune_period * hz, bridge_timer, sc);
3595 CURVNET_RESTORE();
3596 }
3597
3598 /*
3599 * bridge_rtage:
3600 *
3601 * Perform an aging cycle.
3602 */
3603 static void
bridge_rtage(struct bridge_softc * sc)3604 bridge_rtage(struct bridge_softc *sc)
3605 {
3606 struct bridge_rtnode *brt, *nbrt;
3607
3608 BRIDGE_RT_LOCK_ASSERT(sc);
3609
3610 CK_LIST_FOREACH_SAFE(brt, &sc->sc_rtlist, brt_list, nbrt) {
3611 if ((brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC) {
3612 if (time_uptime >= brt->brt_expire)
3613 bridge_rtnode_destroy(sc, brt);
3614 }
3615 }
3616 }
3617
3618 /*
3619 * bridge_rtflush:
3620 *
3621 * Remove all dynamic addresses from the bridge.
3622 */
3623 static void
bridge_rtflush(struct bridge_softc * sc,int full)3624 bridge_rtflush(struct bridge_softc *sc, int full)
3625 {
3626 struct bridge_rtnode *brt, *nbrt;
3627
3628 BRIDGE_RT_LOCK_ASSERT(sc);
3629
3630 CK_LIST_FOREACH_SAFE(brt, &sc->sc_rtlist, brt_list, nbrt) {
3631 if (full || (brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC)
3632 bridge_rtnode_destroy(sc, brt);
3633 }
3634 }
3635
3636 /*
3637 * bridge_rtdaddr:
3638 *
3639 * Remove an address from the table.
3640 */
3641 static int
bridge_rtdaddr(struct bridge_softc * sc,const uint8_t * addr,ether_vlanid_t vlan)3642 bridge_rtdaddr(struct bridge_softc *sc, const uint8_t *addr,
3643 ether_vlanid_t vlan)
3644 {
3645 struct bridge_rtnode *brt;
3646 int found = 0;
3647
3648 BRIDGE_RT_LOCK(sc);
3649
3650 /*
3651 * If vlan is DOT1Q_VID_RSVD_IMPL then we want to delete for all vlans
3652 * so the lookup may return more than one.
3653 */
3654 while ((brt = bridge_rtnode_lookup(sc, addr, vlan)) != NULL) {
3655 bridge_rtnode_destroy(sc, brt);
3656 found = 1;
3657 }
3658
3659 BRIDGE_RT_UNLOCK(sc);
3660
3661 return (found ? 0 : ENOENT);
3662 }
3663
3664 /*
3665 * bridge_rtdelete:
3666 *
3667 * Delete routes to a speicifc member interface.
3668 */
3669 static void
bridge_rtdelete(struct bridge_softc * sc,struct ifnet * ifp,int full)3670 bridge_rtdelete(struct bridge_softc *sc, struct ifnet *ifp, int full)
3671 {
3672 struct bridge_rtnode *brt, *nbrt;
3673
3674 BRIDGE_RT_LOCK_ASSERT(sc);
3675
3676 CK_LIST_FOREACH_SAFE(brt, &sc->sc_rtlist, brt_list, nbrt) {
3677 if (brt->brt_ifp == ifp && (full ||
3678 (brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC))
3679 bridge_rtnode_destroy(sc, brt);
3680 }
3681 }
3682
3683 /*
3684 * bridge_rtable_init:
3685 *
3686 * Initialize the route table for this bridge.
3687 */
3688 static void
bridge_rtable_init(struct bridge_softc * sc)3689 bridge_rtable_init(struct bridge_softc *sc)
3690 {
3691 int i;
3692
3693 sc->sc_rthash = malloc(sizeof(*sc->sc_rthash) * BRIDGE_RTHASH_SIZE,
3694 M_DEVBUF, M_WAITOK);
3695
3696 for (i = 0; i < BRIDGE_RTHASH_SIZE; i++)
3697 CK_LIST_INIT(&sc->sc_rthash[i]);
3698
3699 sc->sc_rthash_key = arc4random();
3700 CK_LIST_INIT(&sc->sc_rtlist);
3701 }
3702
3703 /*
3704 * bridge_rtable_fini:
3705 *
3706 * Deconstruct the route table for this bridge.
3707 */
3708 static void
bridge_rtable_fini(struct bridge_softc * sc)3709 bridge_rtable_fini(struct bridge_softc *sc)
3710 {
3711
3712 KASSERT(sc->sc_brtcnt == 0,
3713 ("%s: %d bridge routes referenced", __func__, sc->sc_brtcnt));
3714 free(sc->sc_rthash, M_DEVBUF);
3715 }
3716
3717 /*
3718 * The following hash function is adapted from "Hash Functions" by Bob Jenkins
3719 * ("Algorithm Alley", Dr. Dobbs Journal, September 1997).
3720 */
3721 #define mix(a, b, c) \
3722 do { \
3723 a -= b; a -= c; a ^= (c >> 13); \
3724 b -= c; b -= a; b ^= (a << 8); \
3725 c -= a; c -= b; c ^= (b >> 13); \
3726 a -= b; a -= c; a ^= (c >> 12); \
3727 b -= c; b -= a; b ^= (a << 16); \
3728 c -= a; c -= b; c ^= (b >> 5); \
3729 a -= b; a -= c; a ^= (c >> 3); \
3730 b -= c; b -= a; b ^= (a << 10); \
3731 c -= a; c -= b; c ^= (b >> 15); \
3732 } while (/*CONSTCOND*/0)
3733
3734 static __inline uint32_t
bridge_rthash(struct bridge_softc * sc,const uint8_t * addr)3735 bridge_rthash(struct bridge_softc *sc, const uint8_t *addr)
3736 {
3737 uint32_t a = 0x9e3779b9, b = 0x9e3779b9, c = sc->sc_rthash_key;
3738
3739 b += addr[5] << 8;
3740 b += addr[4];
3741 a += addr[3] << 24;
3742 a += addr[2] << 16;
3743 a += addr[1] << 8;
3744 a += addr[0];
3745
3746 mix(a, b, c);
3747
3748 return (c & BRIDGE_RTHASH_MASK);
3749 }
3750
3751 #undef mix
3752
3753 static int
bridge_rtnode_addr_cmp(const uint8_t * a,const uint8_t * b)3754 bridge_rtnode_addr_cmp(const uint8_t *a, const uint8_t *b)
3755 {
3756 int i, d;
3757
3758 for (i = 0, d = 0; i < ETHER_ADDR_LEN && d == 0; i++) {
3759 d = ((int)a[i]) - ((int)b[i]);
3760 }
3761
3762 return (d);
3763 }
3764
3765 /*
3766 * bridge_rtnode_lookup:
3767 *
3768 * Look up a bridge route node for the specified destination. Compare the
3769 * vlan id or if zero then just return the first match.
3770 */
3771 static struct bridge_rtnode *
bridge_rtnode_lookup(struct bridge_softc * sc,const uint8_t * addr,ether_vlanid_t vlan)3772 bridge_rtnode_lookup(struct bridge_softc *sc, const uint8_t *addr,
3773 ether_vlanid_t vlan)
3774 {
3775 struct bridge_rtnode *brt;
3776 uint32_t hash;
3777 int dir;
3778
3779 BRIDGE_RT_LOCK_OR_NET_EPOCH_ASSERT(sc);
3780
3781 hash = bridge_rthash(sc, addr);
3782 CK_LIST_FOREACH(brt, &sc->sc_rthash[hash], brt_hash) {
3783 dir = bridge_rtnode_addr_cmp(addr, brt->brt_addr);
3784 if (dir == 0 && (brt->brt_vlan == vlan || vlan == DOT1Q_VID_RSVD_IMPL))
3785 return (brt);
3786 if (dir > 0)
3787 return (NULL);
3788 }
3789
3790 return (NULL);
3791 }
3792
3793 /*
3794 * bridge_rtnode_insert:
3795 *
3796 * Insert the specified bridge node into the route table. We
3797 * assume the entry is not already in the table.
3798 */
3799 static int
bridge_rtnode_insert(struct bridge_softc * sc,struct bridge_rtnode * brt)3800 bridge_rtnode_insert(struct bridge_softc *sc, struct bridge_rtnode *brt)
3801 {
3802 struct bridge_rtnode *lbrt;
3803 uint32_t hash;
3804 int dir;
3805
3806 BRIDGE_RT_LOCK_ASSERT(sc);
3807
3808 hash = bridge_rthash(sc, brt->brt_addr);
3809
3810 lbrt = CK_LIST_FIRST(&sc->sc_rthash[hash]);
3811 if (lbrt == NULL) {
3812 CK_LIST_INSERT_HEAD(&sc->sc_rthash[hash], brt, brt_hash);
3813 goto out;
3814 }
3815
3816 do {
3817 dir = bridge_rtnode_addr_cmp(brt->brt_addr, lbrt->brt_addr);
3818 if (dir == 0 && brt->brt_vlan == lbrt->brt_vlan)
3819 return (EXTERROR(EEXIST, "Address already exists"));
3820 if (dir > 0) {
3821 CK_LIST_INSERT_BEFORE(lbrt, brt, brt_hash);
3822 goto out;
3823 }
3824 if (CK_LIST_NEXT(lbrt, brt_hash) == NULL) {
3825 CK_LIST_INSERT_AFTER(lbrt, brt, brt_hash);
3826 goto out;
3827 }
3828 lbrt = CK_LIST_NEXT(lbrt, brt_hash);
3829 } while (lbrt != NULL);
3830
3831 #ifdef DIAGNOSTIC
3832 panic("bridge_rtnode_insert: impossible");
3833 #endif
3834
3835 out:
3836 CK_LIST_INSERT_HEAD(&sc->sc_rtlist, brt, brt_list);
3837 sc->sc_brtcnt++;
3838
3839 return (0);
3840 }
3841
3842 static void
bridge_rtnode_destroy_cb(struct epoch_context * ctx)3843 bridge_rtnode_destroy_cb(struct epoch_context *ctx)
3844 {
3845 struct bridge_rtnode *brt;
3846
3847 brt = __containerof(ctx, struct bridge_rtnode, brt_epoch_ctx);
3848
3849 CURVNET_SET(brt->brt_vnet);
3850 uma_zfree(V_bridge_rtnode_zone, brt);
3851 CURVNET_RESTORE();
3852 }
3853
3854 /*
3855 * bridge_rtnode_destroy:
3856 *
3857 * Destroy a bridge rtnode.
3858 */
3859 static void
bridge_rtnode_destroy(struct bridge_softc * sc,struct bridge_rtnode * brt)3860 bridge_rtnode_destroy(struct bridge_softc *sc, struct bridge_rtnode *brt)
3861 {
3862 BRIDGE_RT_LOCK_ASSERT(sc);
3863
3864 CK_LIST_REMOVE(brt, brt_hash);
3865
3866 CK_LIST_REMOVE(brt, brt_list);
3867 sc->sc_brtcnt--;
3868 brt->brt_dst->bif_addrcnt--;
3869
3870 NET_EPOCH_CALL(bridge_rtnode_destroy_cb, &brt->brt_epoch_ctx);
3871 }
3872
3873 /*
3874 * bridge_rtable_expire:
3875 *
3876 * Set the expiry time for all routes on an interface.
3877 */
3878 static void
bridge_rtable_expire(struct ifnet * ifp,int age)3879 bridge_rtable_expire(struct ifnet *ifp, int age)
3880 {
3881 struct bridge_iflist *bif = NULL;
3882 struct bridge_softc *sc = NULL;
3883 struct bridge_rtnode *brt;
3884
3885 CURVNET_SET(ifp->if_vnet);
3886
3887 bif = ifp->if_bridge;
3888 if (bif)
3889 sc = bif->bif_sc;
3890 MPASS(sc != NULL);
3891 BRIDGE_RT_LOCK(sc);
3892
3893 /*
3894 * If the age is zero then flush, otherwise set all the expiry times to
3895 * age for the interface
3896 */
3897 if (age == 0)
3898 bridge_rtdelete(sc, ifp, IFBF_FLUSHDYN);
3899 else {
3900 CK_LIST_FOREACH(brt, &sc->sc_rtlist, brt_list) {
3901 /* Cap the expiry time to 'age' */
3902 if (brt->brt_ifp == ifp &&
3903 brt->brt_expire > time_uptime + age &&
3904 (brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC)
3905 brt->brt_expire = time_uptime + age;
3906 }
3907 }
3908 BRIDGE_RT_UNLOCK(sc);
3909 CURVNET_RESTORE();
3910 }
3911
3912 /*
3913 * bridge_state_change:
3914 *
3915 * Callback from the bridgestp code when a port changes states.
3916 */
3917 static void
bridge_state_change(struct ifnet * ifp,int state)3918 bridge_state_change(struct ifnet *ifp, int state)
3919 {
3920 struct bridge_iflist *bif = ifp->if_bridge;
3921 struct bridge_softc *sc = bif->bif_sc;
3922 static const char *stpstates[] = {
3923 "disabled",
3924 "listening",
3925 "learning",
3926 "forwarding",
3927 "blocking",
3928 "discarding"
3929 };
3930
3931 CURVNET_SET(ifp->if_vnet);
3932 if (V_log_stp)
3933 log(LOG_NOTICE, "%s: state changed to %s on %s\n",
3934 sc->sc_ifp->if_xname, stpstates[state], ifp->if_xname);
3935 CURVNET_RESTORE();
3936 }
3937
3938 /*
3939 * Send bridge packets through pfil if they are one of the types pfil can deal
3940 * with, or if they are ARP or REVARP. (pfil will pass ARP and REVARP without
3941 * question.) If *bifp or *ifp are NULL then packet filtering is skipped for
3942 * that interface.
3943 */
3944 static int
bridge_pfil(struct mbuf ** mp,struct ifnet * bifp,struct ifnet * ifp,int dir)3945 bridge_pfil(struct mbuf **mp, struct ifnet *bifp, struct ifnet *ifp, int dir)
3946 {
3947 int snap, error, i;
3948 struct ether_header *eh1, eh2;
3949 struct llc llc1;
3950 u_int16_t ether_type;
3951 pfil_return_t rv;
3952 #ifdef INET
3953 struct ip *ip = NULL;
3954 int hlen = 0;
3955 #endif
3956
3957 snap = 0;
3958 error = -1; /* Default error if not error == 0 */
3959
3960 #if 0
3961 /* we may return with the IP fields swapped, ensure its not shared */
3962 KASSERT(M_WRITABLE(*mp), ("%s: modifying a shared mbuf", __func__));
3963 #endif
3964
3965 if (V_pfil_bridge == 0 && V_pfil_member == 0 && V_pfil_ipfw == 0)
3966 return (0); /* filtering is disabled */
3967
3968 i = min((*mp)->m_pkthdr.len, max_protohdr);
3969 if ((*mp)->m_len < i) {
3970 *mp = m_pullup(*mp, i);
3971 if (*mp == NULL) {
3972 printf("%s: m_pullup failed\n", __func__);
3973 return (-1);
3974 }
3975 }
3976
3977 eh1 = mtod(*mp, struct ether_header *);
3978 ether_type = ntohs(eh1->ether_type);
3979
3980 /*
3981 * Check for SNAP/LLC.
3982 */
3983 if (ether_type < ETHERMTU) {
3984 struct llc *llc2 = (struct llc *)(eh1 + 1);
3985
3986 if ((*mp)->m_len >= ETHER_HDR_LEN + 8 &&
3987 llc2->llc_dsap == LLC_SNAP_LSAP &&
3988 llc2->llc_ssap == LLC_SNAP_LSAP &&
3989 llc2->llc_control == LLC_UI) {
3990 ether_type = htons(llc2->llc_un.type_snap.ether_type);
3991 snap = 1;
3992 }
3993 }
3994
3995 /*
3996 * If we're trying to filter bridge traffic, only look at traffic for
3997 * protocols available in the kernel (IPv4 and/or IPv6) to avoid
3998 * passing traffic for an unsupported protocol to the filter. This is
3999 * lame since if we really wanted, say, an AppleTalk filter, we are
4000 * hosed, but of course we don't have an AppleTalk filter to begin
4001 * with. (Note that since pfil doesn't understand ARP it will pass
4002 * *ALL* ARP traffic.)
4003 */
4004 switch (ether_type) {
4005 #ifdef INET
4006 case ETHERTYPE_ARP:
4007 case ETHERTYPE_REVARP:
4008 if (V_pfil_ipfw_arp == 0)
4009 return (0); /* Automatically pass */
4010
4011 /* FALLTHROUGH */
4012 case ETHERTYPE_IP:
4013 #endif
4014 #ifdef INET6
4015 case ETHERTYPE_IPV6:
4016 #endif /* INET6 */
4017 break;
4018
4019 default:
4020 /*
4021 * We get here if the packet isn't from a supported
4022 * protocol. Check to see if the user wants to pass
4023 * non-IP packets, these will not be checked by pfil(9)
4024 * and passed unconditionally so the default is to
4025 * drop.
4026 */
4027 if (V_pfil_onlyip)
4028 goto bad;
4029 }
4030
4031 /* Run the packet through pfil before stripping link headers */
4032 if (PFIL_HOOKED_OUT(V_link_pfil_head) && V_pfil_ipfw != 0 &&
4033 dir == PFIL_OUT && ifp != NULL) {
4034 switch (pfil_mbuf_out(V_link_pfil_head, mp, ifp, NULL)) {
4035 case PFIL_DROPPED:
4036 return (EACCES);
4037 case PFIL_CONSUMED:
4038 return (0);
4039 }
4040 }
4041
4042 /* Strip off the Ethernet header and keep a copy. */
4043 m_copydata(*mp, 0, ETHER_HDR_LEN, (caddr_t) &eh2);
4044 m_adj(*mp, ETHER_HDR_LEN);
4045
4046 /* Strip off snap header, if present */
4047 if (snap) {
4048 m_copydata(*mp, 0, sizeof(struct llc), (caddr_t) &llc1);
4049 m_adj(*mp, sizeof(struct llc));
4050 }
4051
4052 /*
4053 * Check the IP header for alignment and errors
4054 */
4055 if (dir == PFIL_IN) {
4056 switch (ether_type) {
4057 #ifdef INET
4058 case ETHERTYPE_IP:
4059 error = bridge_ip_checkbasic(mp);
4060 break;
4061 #endif
4062 #ifdef INET6
4063 case ETHERTYPE_IPV6:
4064 error = bridge_ip6_checkbasic(mp);
4065 break;
4066 #endif /* INET6 */
4067 default:
4068 error = 0;
4069 }
4070 if (error)
4071 goto bad;
4072 }
4073
4074 error = 0;
4075
4076 /*
4077 * Run the packet through pfil
4078 */
4079 rv = PFIL_PASS;
4080 switch (ether_type) {
4081 #ifdef INET
4082 case ETHERTYPE_IP:
4083 /*
4084 * Run pfil on the member interface and the bridge, both can
4085 * be skipped by clearing pfil_member or pfil_bridge.
4086 *
4087 * Keep the order:
4088 * in_if -> bridge_if -> out_if
4089 */
4090 if (V_pfil_bridge && dir == PFIL_OUT && bifp != NULL && (rv =
4091 pfil_mbuf_out(V_inet_pfil_head, mp, bifp, NULL)) !=
4092 PFIL_PASS)
4093 break;
4094
4095 if (V_pfil_member && ifp != NULL) {
4096 rv = (dir == PFIL_OUT) ?
4097 pfil_mbuf_out(V_inet_pfil_head, mp, ifp, NULL) :
4098 pfil_mbuf_in(V_inet_pfil_head, mp, ifp, NULL);
4099 if (rv != PFIL_PASS)
4100 break;
4101 }
4102
4103 if (V_pfil_bridge && dir == PFIL_IN && bifp != NULL && (rv =
4104 pfil_mbuf_in(V_inet_pfil_head, mp, bifp, NULL)) !=
4105 PFIL_PASS)
4106 break;
4107
4108 /* check if we need to fragment the packet */
4109 /* bridge_fragment generates a mbuf chain of packets */
4110 /* that already include eth headers */
4111 if (V_pfil_member && ifp != NULL && dir == PFIL_OUT) {
4112 i = (*mp)->m_pkthdr.len;
4113 if (i > ifp->if_mtu) {
4114 error = bridge_fragment(ifp, mp, &eh2, snap,
4115 &llc1);
4116 return (error);
4117 }
4118 }
4119
4120 /* Recalculate the ip checksum. */
4121 ip = mtod(*mp, struct ip *);
4122 hlen = ip->ip_hl << 2;
4123 if (hlen < sizeof(struct ip))
4124 goto bad;
4125 if (hlen > (*mp)->m_len) {
4126 if ((*mp = m_pullup(*mp, hlen)) == NULL)
4127 goto bad;
4128 ip = mtod(*mp, struct ip *);
4129 if (ip == NULL)
4130 goto bad;
4131 }
4132 ip->ip_sum = 0;
4133 if (hlen == sizeof(struct ip))
4134 ip->ip_sum = in_cksum_hdr(ip);
4135 else
4136 ip->ip_sum = in_cksum(*mp, hlen);
4137
4138 break;
4139 #endif /* INET */
4140 #ifdef INET6
4141 case ETHERTYPE_IPV6:
4142 if (V_pfil_bridge && dir == PFIL_OUT && bifp != NULL && (rv =
4143 pfil_mbuf_out(V_inet6_pfil_head, mp, bifp, NULL)) !=
4144 PFIL_PASS)
4145 break;
4146
4147 if (V_pfil_member && ifp != NULL) {
4148 rv = (dir == PFIL_OUT) ?
4149 pfil_mbuf_out(V_inet6_pfil_head, mp, ifp, NULL) :
4150 pfil_mbuf_in(V_inet6_pfil_head, mp, ifp, NULL);
4151 if (rv != PFIL_PASS)
4152 break;
4153 }
4154
4155 if (V_pfil_bridge && dir == PFIL_IN && bifp != NULL && (rv =
4156 pfil_mbuf_in(V_inet6_pfil_head, mp, bifp, NULL)) !=
4157 PFIL_PASS)
4158 break;
4159 break;
4160 #endif
4161 }
4162
4163 switch (rv) {
4164 case PFIL_CONSUMED:
4165 return (0);
4166 case PFIL_DROPPED:
4167 return (EACCES);
4168 default:
4169 break;
4170 }
4171
4172 error = -1;
4173
4174 /*
4175 * Finally, put everything back the way it was and return
4176 */
4177 if (snap) {
4178 M_PREPEND(*mp, sizeof(struct llc), M_NOWAIT);
4179 if (*mp == NULL)
4180 return (error);
4181 bcopy(&llc1, mtod(*mp, caddr_t), sizeof(struct llc));
4182 }
4183
4184 M_PREPEND(*mp, ETHER_HDR_LEN, M_NOWAIT);
4185 if (*mp == NULL)
4186 return (error);
4187 bcopy(&eh2, mtod(*mp, caddr_t), ETHER_HDR_LEN);
4188
4189 return (0);
4190
4191 bad:
4192 m_freem(*mp);
4193 *mp = NULL;
4194 return (error);
4195 }
4196
4197 #ifdef INET
4198 /*
4199 * Perform basic checks on header size since
4200 * pfil assumes ip_input has already processed
4201 * it for it. Cut-and-pasted from ip_input.c.
4202 * Given how simple the IPv6 version is,
4203 * does the IPv4 version really need to be
4204 * this complicated?
4205 *
4206 * XXX Should we update ipstat here, or not?
4207 * XXX Right now we update ipstat but not
4208 * XXX csum_counter.
4209 */
4210 static int
bridge_ip_checkbasic(struct mbuf ** mp)4211 bridge_ip_checkbasic(struct mbuf **mp)
4212 {
4213 struct mbuf *m = *mp;
4214 struct ip *ip;
4215 int len, hlen;
4216 u_short sum;
4217
4218 if (*mp == NULL)
4219 return (-1);
4220
4221 if (IP_HDR_ALIGNED_P(mtod(m, caddr_t)) == 0) {
4222 if ((m = m_copyup(m, sizeof(struct ip),
4223 (max_linkhdr + 3) & ~3)) == NULL) {
4224 /* XXXJRT new stat, please */
4225 KMOD_IPSTAT_INC(ips_toosmall);
4226 goto bad;
4227 }
4228 } else if (__predict_false(m->m_len < sizeof (struct ip))) {
4229 if ((m = m_pullup(m, sizeof (struct ip))) == NULL) {
4230 KMOD_IPSTAT_INC(ips_toosmall);
4231 goto bad;
4232 }
4233 }
4234 ip = mtod(m, struct ip *);
4235 if (ip == NULL) goto bad;
4236
4237 if (ip->ip_v != IPVERSION) {
4238 KMOD_IPSTAT_INC(ips_badvers);
4239 goto bad;
4240 }
4241 hlen = ip->ip_hl << 2;
4242 if (hlen < sizeof(struct ip)) { /* minimum header length */
4243 KMOD_IPSTAT_INC(ips_badhlen);
4244 goto bad;
4245 }
4246 if (hlen > m->m_len) {
4247 if ((m = m_pullup(m, hlen)) == NULL) {
4248 KMOD_IPSTAT_INC(ips_badhlen);
4249 goto bad;
4250 }
4251 ip = mtod(m, struct ip *);
4252 if (ip == NULL) goto bad;
4253 }
4254
4255 if (m->m_pkthdr.csum_flags & CSUM_IP_CHECKED) {
4256 sum = !(m->m_pkthdr.csum_flags & CSUM_IP_VALID);
4257 } else {
4258 if (hlen == sizeof(struct ip)) {
4259 sum = in_cksum_hdr(ip);
4260 } else {
4261 sum = in_cksum(m, hlen);
4262 }
4263 }
4264 if (sum) {
4265 KMOD_IPSTAT_INC(ips_badsum);
4266 goto bad;
4267 }
4268
4269 /* Retrieve the packet length. */
4270 len = ntohs(ip->ip_len);
4271
4272 /*
4273 * Check for additional length bogosity
4274 */
4275 if (len < hlen) {
4276 KMOD_IPSTAT_INC(ips_badlen);
4277 goto bad;
4278 }
4279
4280 /*
4281 * Check that the amount of data in the buffers
4282 * is as at least much as the IP header would have us expect.
4283 * Drop packet if shorter than we expect.
4284 */
4285 if (m->m_pkthdr.len < len) {
4286 KMOD_IPSTAT_INC(ips_tooshort);
4287 goto bad;
4288 }
4289
4290 /* Checks out, proceed */
4291 *mp = m;
4292 return (0);
4293
4294 bad:
4295 *mp = m;
4296 return (-1);
4297 }
4298 #endif /* INET */
4299
4300 #ifdef INET6
4301 /*
4302 * Same as above, but for IPv6.
4303 * Cut-and-pasted from ip6_input.c.
4304 * XXX Should we update ip6stat, or not?
4305 */
4306 static int
bridge_ip6_checkbasic(struct mbuf ** mp)4307 bridge_ip6_checkbasic(struct mbuf **mp)
4308 {
4309 struct mbuf *m = *mp;
4310 struct ip6_hdr *ip6;
4311
4312 /*
4313 * If the IPv6 header is not aligned, slurp it up into a new
4314 * mbuf with space for link headers, in the event we forward
4315 * it. Otherwise, if it is aligned, make sure the entire base
4316 * IPv6 header is in the first mbuf of the chain.
4317 */
4318 if (IP6_HDR_ALIGNED_P(mtod(m, caddr_t)) == 0) {
4319 struct ifnet *inifp = m->m_pkthdr.rcvif;
4320 if ((m = m_copyup(m, sizeof(struct ip6_hdr),
4321 (max_linkhdr + 3) & ~3)) == NULL) {
4322 /* XXXJRT new stat, please */
4323 IP6STAT_INC(ip6s_toosmall);
4324 in6_ifstat_inc(inifp, ifs6_in_hdrerr);
4325 goto bad;
4326 }
4327 } else if (__predict_false(m->m_len < sizeof(struct ip6_hdr))) {
4328 struct ifnet *inifp = m->m_pkthdr.rcvif;
4329 if ((m = m_pullup(m, sizeof(struct ip6_hdr))) == NULL) {
4330 IP6STAT_INC(ip6s_toosmall);
4331 in6_ifstat_inc(inifp, ifs6_in_hdrerr);
4332 goto bad;
4333 }
4334 }
4335
4336 ip6 = mtod(m, struct ip6_hdr *);
4337
4338 if ((ip6->ip6_vfc & IPV6_VERSION_MASK) != IPV6_VERSION) {
4339 IP6STAT_INC(ip6s_badvers);
4340 in6_ifstat_inc(m->m_pkthdr.rcvif, ifs6_in_hdrerr);
4341 goto bad;
4342 }
4343
4344 /* Checks out, proceed */
4345 *mp = m;
4346 return (0);
4347
4348 bad:
4349 *mp = m;
4350 return (-1);
4351 }
4352 #endif /* INET6 */
4353
4354 #ifdef INET
4355 /*
4356 * bridge_fragment:
4357 *
4358 * Fragment mbuf chain in multiple packets and prepend ethernet header.
4359 */
4360 static int
bridge_fragment(struct ifnet * ifp,struct mbuf ** mp,struct ether_header * eh,int snap,struct llc * llc)4361 bridge_fragment(struct ifnet *ifp, struct mbuf **mp, struct ether_header *eh,
4362 int snap, struct llc *llc)
4363 {
4364 struct mbuf *m = *mp, *nextpkt = NULL, *mprev = NULL, *mcur = NULL;
4365 struct ip *ip;
4366 int error = -1;
4367
4368 if (m->m_len < sizeof(struct ip) &&
4369 (m = m_pullup(m, sizeof(struct ip))) == NULL)
4370 goto dropit;
4371 ip = mtod(m, struct ip *);
4372
4373 m->m_pkthdr.csum_flags |= CSUM_IP;
4374 error = ip_fragment(ip, &m, ifp->if_mtu, ifp->if_hwassist);
4375 if (error)
4376 goto dropit;
4377
4378 /*
4379 * Walk the chain and re-add the Ethernet header for
4380 * each mbuf packet.
4381 */
4382 for (mcur = m; mcur; mcur = mcur->m_nextpkt) {
4383 nextpkt = mcur->m_nextpkt;
4384 mcur->m_nextpkt = NULL;
4385 if (snap) {
4386 M_PREPEND(mcur, sizeof(struct llc), M_NOWAIT);
4387 if (mcur == NULL) {
4388 error = ENOBUFS;
4389 if (mprev != NULL)
4390 mprev->m_nextpkt = nextpkt;
4391 goto dropit;
4392 }
4393 bcopy(llc, mtod(mcur, caddr_t),sizeof(struct llc));
4394 }
4395
4396 M_PREPEND(mcur, ETHER_HDR_LEN, M_NOWAIT);
4397 if (mcur == NULL) {
4398 error = ENOBUFS;
4399 if (mprev != NULL)
4400 mprev->m_nextpkt = nextpkt;
4401 goto dropit;
4402 }
4403 bcopy(eh, mtod(mcur, caddr_t), ETHER_HDR_LEN);
4404
4405 /*
4406 * The previous two M_PREPEND could have inserted one or two
4407 * mbufs in front so we have to update the previous packet's
4408 * m_nextpkt.
4409 */
4410 mcur->m_nextpkt = nextpkt;
4411 if (mprev != NULL)
4412 mprev->m_nextpkt = mcur;
4413 else {
4414 /* The first mbuf in the original chain needs to be
4415 * updated. */
4416 *mp = mcur;
4417 }
4418 mprev = mcur;
4419 }
4420
4421 KMOD_IPSTAT_INC(ips_fragmented);
4422 return (error);
4423
4424 dropit:
4425 for (mcur = *mp; mcur; mcur = m) { /* droping the full packet chain */
4426 m = mcur->m_nextpkt;
4427 m_freem(mcur);
4428 }
4429 return (error);
4430 }
4431 #endif /* INET */
4432
4433 static void
bridge_linkstate(struct ifnet * ifp)4434 bridge_linkstate(struct ifnet *ifp)
4435 {
4436 struct bridge_softc *sc = NULL;
4437 struct bridge_iflist *bif;
4438 struct epoch_tracker et;
4439
4440 NET_EPOCH_ENTER(et);
4441
4442 bif = ifp->if_bridge;
4443 if (bif)
4444 sc = bif->bif_sc;
4445
4446 if (sc != NULL) {
4447 bridge_linkcheck(sc);
4448 bstp_linkstate(&bif->bif_stp);
4449 }
4450
4451 NET_EPOCH_EXIT(et);
4452 }
4453
4454 static void
bridge_linkcheck(struct bridge_softc * sc)4455 bridge_linkcheck(struct bridge_softc *sc)
4456 {
4457 struct bridge_iflist *bif;
4458 int new_link, hasls;
4459
4460 BRIDGE_LOCK_OR_NET_EPOCH_ASSERT(sc);
4461
4462 new_link = LINK_STATE_DOWN;
4463 hasls = 0;
4464 /* Our link is considered up if at least one of our ports is active */
4465 CK_LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
4466 if (bif->bif_ifp->if_capabilities & IFCAP_LINKSTATE)
4467 hasls++;
4468 if (bif->bif_ifp->if_link_state == LINK_STATE_UP) {
4469 new_link = LINK_STATE_UP;
4470 break;
4471 }
4472 }
4473 if (!CK_LIST_EMPTY(&sc->sc_iflist) && !hasls) {
4474 /* If no interfaces support link-state then we default to up */
4475 new_link = LINK_STATE_UP;
4476 }
4477 if_link_state_change(sc->sc_ifp, new_link);
4478 }
4479