1 /* $NetBSD: if_bridge.c,v 1.31 2005/06/01 19:45:34 jdc Exp $ */
2
3 /*-
4 * SPDX-License-Identifier: BSD-4-Clause
5 *
6 * Copyright 2001 Wasabi Systems, Inc.
7 * All rights reserved.
8 *
9 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed for the NetBSD Project by
22 * Wasabi Systems, Inc.
23 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
24 * or promote products derived from this software without specific prior
25 * written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
28 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE.
38 */
39
40 /*
41 * Copyright (c) 1999, 2000 Jason L. Wright (jason@thought.net)
42 * All rights reserved.
43 *
44 * Redistribution and use in source and binary forms, with or without
45 * modification, are permitted provided that the following conditions
46 * are met:
47 * 1. Redistributions of source code must retain the above copyright
48 * notice, this list of conditions and the following disclaimer.
49 * 2. Redistributions in binary form must reproduce the above copyright
50 * notice, this list of conditions and the following disclaimer in the
51 * documentation and/or other materials provided with the distribution.
52 *
53 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
54 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
55 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
56 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
57 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
58 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
59 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
60 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
61 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
62 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
63 * POSSIBILITY OF SUCH DAMAGE.
64 *
65 * OpenBSD: if_bridge.c,v 1.60 2001/06/15 03:38:33 itojun Exp
66 */
67
68 /*
69 * Network interface bridge support.
70 *
71 * TODO:
72 *
73 * - Currently only supports Ethernet-like interfaces (Ethernet,
74 * 802.11, VLANs on Ethernet, etc.) Figure out a nice way
75 * to bridge other types of interfaces (maybe consider
76 * heterogeneous bridges).
77 */
78
79 #include "opt_inet.h"
80 #include "opt_inet6.h"
81
82 #define EXTERR_CATEGORY EXTERR_CAT_BRIDGE
83
84 #include <sys/param.h>
85 #include <sys/ctype.h> /* string functions */
86 #include <sys/eventhandler.h>
87 #include <sys/exterrvar.h>
88 #include <sys/jail.h>
89 #include <sys/kernel.h>
90 #include <sys/lock.h>
91 #include <sys/malloc.h>
92 #include <sys/mbuf.h>
93 #include <sys/module.h>
94 #include <sys/mutex.h>
95 #include <sys/priv.h>
96 #include <sys/proc.h>
97 #include <sys/protosw.h>
98 #include <sys/random.h>
99 #include <sys/systm.h>
100 #include <sys/socket.h> /* for net/if.h */
101 #include <sys/sockio.h>
102 #include <sys/syslog.h>
103 #include <sys/sysctl.h>
104 #include <sys/time.h>
105
106 #include <vm/uma.h>
107
108 #include <net/bpf.h>
109 #include <net/if.h>
110 #include <net/if_clone.h>
111 #include <net/if_dl.h>
112 #include <net/if_types.h>
113 #include <net/if_var.h>
114 #include <net/if_private.h>
115 #include <net/pfil.h>
116 #include <net/vnet.h>
117
118 #include <netinet/in.h>
119 #include <netinet/in_systm.h>
120 #include <netinet/in_var.h>
121 #include <netinet/ip.h>
122 #include <netinet/ip_var.h>
123 #ifdef INET6
124 #include <netinet/ip6.h>
125 #include <netinet6/ip6_var.h>
126 #include <netinet6/in6_ifattach.h>
127 #endif
128 #if defined(INET) || defined(INET6)
129 #include <netinet/ip_carp.h>
130 #endif
131 #include <machine/in_cksum.h>
132 #include <netinet/if_ether.h>
133 #include <net/bridgestp.h>
134 #include <net/if_bridgevar.h>
135 #include <net/if_llc.h>
136 #include <net/if_vlan_var.h>
137
138 #include <net/route.h>
139
140 /*
141 * At various points in the code we need to know if we're hooked into the INET
142 * and/or INET6 pfil. Define some macros to do that based on which IP versions
143 * are enabled in the kernel. This avoids littering the rest of the code with
144 * #ifnet INET6 to avoid referencing V_inet6_pfil_head.
145 */
146 #ifdef INET6
147 #define PFIL_HOOKED_IN_INET6 PFIL_HOOKED_IN(V_inet6_pfil_head)
148 #define PFIL_HOOKED_OUT_INET6 PFIL_HOOKED_OUT(V_inet6_pfil_head)
149 #else
150 #define PFIL_HOOKED_IN_INET6 false
151 #define PFIL_HOOKED_OUT_INET6 false
152 #endif
153
154 #ifdef INET
155 #define PFIL_HOOKED_IN_INET PFIL_HOOKED_IN(V_inet_pfil_head)
156 #define PFIL_HOOKED_OUT_INET PFIL_HOOKED_OUT(V_inet_pfil_head)
157 #else
158 #define PFIL_HOOKED_IN_INET false
159 #define PFIL_HOOKED_OUT_INET false
160 #endif
161
162 #define PFIL_HOOKED_IN_46 (PFIL_HOOKED_IN_INET6 || PFIL_HOOKED_IN_INET)
163 #define PFIL_HOOKED_OUT_46 (PFIL_HOOKED_OUT_INET6 || PFIL_HOOKED_OUT_INET)
164
165 /*
166 * Size of the route hash table. Must be a power of two.
167 */
168 #ifndef BRIDGE_RTHASH_SIZE
169 #define BRIDGE_RTHASH_SIZE 1024
170 #endif
171
172 #define BRIDGE_RTHASH_MASK (BRIDGE_RTHASH_SIZE - 1)
173
174 /*
175 * Default maximum number of addresses to cache.
176 */
177 #ifndef BRIDGE_RTABLE_MAX
178 #define BRIDGE_RTABLE_MAX 2000
179 #endif
180
181 /*
182 * Timeout (in seconds) for entries learned dynamically.
183 */
184 #ifndef BRIDGE_RTABLE_TIMEOUT
185 #define BRIDGE_RTABLE_TIMEOUT (20 * 60) /* same as ARP */
186 #endif
187
188 /*
189 * Number of seconds between walks of the route list.
190 */
191 #ifndef BRIDGE_RTABLE_PRUNE_PERIOD
192 #define BRIDGE_RTABLE_PRUNE_PERIOD (5 * 60)
193 #endif
194
195 /*
196 * List of capabilities to possibly mask on the member interface.
197 */
198 #define BRIDGE_IFCAPS_MASK (IFCAP_TOE|IFCAP_TSO|IFCAP_TXCSUM|\
199 IFCAP_TXCSUM_IPV6|IFCAP_MEXTPG)
200
201 /*
202 * List of capabilities to strip
203 */
204 #define BRIDGE_IFCAPS_STRIP IFCAP_LRO
205
206 /*
207 * Bridge locking
208 *
209 * The bridge relies heavily on the epoch(9) system to protect its data
210 * structures. This means we can safely use CK_LISTs while in NET_EPOCH, but we
211 * must ensure there is only one writer at a time.
212 *
213 * That is: for read accesses we only need to be in NET_EPOCH, but for write
214 * accesses we must hold:
215 *
216 * - BRIDGE_RT_LOCK, for any change to bridge_rtnodes
217 * - BRIDGE_LOCK, for any other change
218 *
219 * The BRIDGE_LOCK is a sleepable lock, because it is held across ioctl()
220 * calls to bridge member interfaces and these ioctl()s can sleep.
221 * The BRIDGE_RT_LOCK is a non-sleepable mutex, because it is sometimes
222 * required while we're in NET_EPOCH and then we're not allowed to sleep.
223 */
224 #define BRIDGE_LOCK_INIT(_sc) do { \
225 sx_init(&(_sc)->sc_sx, "if_bridge"); \
226 mtx_init(&(_sc)->sc_rt_mtx, "if_bridge rt", NULL, MTX_DEF); \
227 } while (0)
228 #define BRIDGE_LOCK_DESTROY(_sc) do { \
229 sx_destroy(&(_sc)->sc_sx); \
230 mtx_destroy(&(_sc)->sc_rt_mtx); \
231 } while (0)
232 #define BRIDGE_LOCK(_sc) sx_xlock(&(_sc)->sc_sx)
233 #define BRIDGE_UNLOCK(_sc) sx_xunlock(&(_sc)->sc_sx)
234 #define BRIDGE_LOCK_ASSERT(_sc) sx_assert(&(_sc)->sc_sx, SX_XLOCKED)
235 #define BRIDGE_LOCK_OR_NET_EPOCH_ASSERT(_sc) \
236 MPASS(in_epoch(net_epoch_preempt) || sx_xlocked(&(_sc)->sc_sx))
237 #define BRIDGE_UNLOCK_ASSERT(_sc) sx_assert(&(_sc)->sc_sx, SX_UNLOCKED)
238 #define BRIDGE_RT_LOCK(_sc) mtx_lock(&(_sc)->sc_rt_mtx)
239 #define BRIDGE_RT_UNLOCK(_sc) mtx_unlock(&(_sc)->sc_rt_mtx)
240 #define BRIDGE_RT_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->sc_rt_mtx, MA_OWNED)
241 #define BRIDGE_RT_LOCK_OR_NET_EPOCH_ASSERT(_sc) \
242 MPASS(in_epoch(net_epoch_preempt) || mtx_owned(&(_sc)->sc_rt_mtx))
243
244 struct bridge_softc;
245
246 /*
247 * Bridge interface list entry.
248 */
249 struct bridge_iflist {
250 CK_LIST_ENTRY(bridge_iflist) bif_next;
251 struct ifnet *bif_ifp; /* member if */
252 struct bridge_softc *bif_sc; /* parent bridge */
253 struct bstp_port bif_stp; /* STP state */
254 uint32_t bif_flags; /* member if flags */
255 int bif_savedcaps; /* saved capabilities */
256 uint32_t bif_addrmax; /* max # of addresses */
257 uint32_t bif_addrcnt; /* cur. # of addresses */
258 uint32_t bif_addrexceeded;/* # of address violations */
259 struct epoch_context bif_epoch_ctx;
260 ether_vlanid_t bif_pvid; /* port vlan id */
261 ifbvlan_set_t bif_vlan_set; /* if allowed tagged vlans */
262 uint16_t bif_vlanproto; /* vlan protocol */
263 };
264
265 /*
266 * Bridge route node.
267 */
268 struct bridge_rtnode {
269 CK_LIST_ENTRY(bridge_rtnode) brt_hash; /* hash table linkage */
270 CK_LIST_ENTRY(bridge_rtnode) brt_list; /* list linkage */
271 struct bridge_iflist *brt_dst; /* destination if */
272 unsigned long brt_expire; /* expiration time */
273 uint8_t brt_flags; /* address flags */
274 uint8_t brt_addr[ETHER_ADDR_LEN];
275 ether_vlanid_t brt_vlan; /* vlan id */
276 struct vnet *brt_vnet;
277 struct epoch_context brt_epoch_ctx;
278 };
279 #define brt_ifp brt_dst->bif_ifp
280
281 /*
282 * Software state for each bridge.
283 */
284 struct bridge_softc {
285 struct ifnet *sc_ifp; /* make this an interface */
286 LIST_ENTRY(bridge_softc) sc_list;
287 struct sx sc_sx;
288 struct mtx sc_rt_mtx;
289 uint32_t sc_brtmax; /* max # of addresses */
290 uint32_t sc_brtcnt; /* cur. # of addresses */
291 uint32_t sc_brttimeout; /* rt timeout in seconds */
292 struct callout sc_brcallout; /* bridge callout */
293 CK_LIST_HEAD(, bridge_iflist) sc_iflist; /* member interface list */
294 CK_LIST_HEAD(, bridge_rtnode) *sc_rthash; /* our forwarding table */
295 CK_LIST_HEAD(, bridge_rtnode) sc_rtlist; /* list version of above */
296 uint32_t sc_rthash_key; /* key for hash */
297 CK_LIST_HEAD(, bridge_iflist) sc_spanlist; /* span ports list */
298 struct bstp_state sc_stp; /* STP state */
299 uint32_t sc_brtexceeded; /* # of cache drops */
300 struct ifnet *sc_ifaddr; /* member mac copied from */
301 struct ether_addr sc_defaddr; /* Default MAC address */
302 if_input_fn_t sc_if_input; /* Saved copy of if_input */
303 struct epoch_context sc_epoch_ctx;
304 ifbr_flags_t sc_flags; /* bridge flags */
305 ether_vlanid_t sc_defpvid; /* default PVID */
306 };
307
308 VNET_DEFINE_STATIC(struct sx, bridge_list_sx);
309 #define V_bridge_list_sx VNET(bridge_list_sx)
310 static eventhandler_tag bridge_detach_cookie;
311
312 int bridge_rtable_prune_period = BRIDGE_RTABLE_PRUNE_PERIOD;
313
314 VNET_DEFINE_STATIC(uma_zone_t, bridge_rtnode_zone);
315 #define V_bridge_rtnode_zone VNET(bridge_rtnode_zone)
316
317 static int bridge_clone_create(struct if_clone *, char *, size_t,
318 struct ifc_data *, struct ifnet **);
319 static int bridge_clone_destroy(struct if_clone *, struct ifnet *, uint32_t);
320
321 static int bridge_ioctl(struct ifnet *, u_long, caddr_t);
322 static void bridge_mutecaps(struct bridge_softc *);
323 static void bridge_set_ifcap(struct bridge_softc *, struct bridge_iflist *,
324 int);
325 static void bridge_ifdetach(void *arg __unused, struct ifnet *);
326 static void bridge_init(void *);
327 static void bridge_dummynet(struct mbuf *, struct ifnet *);
328 static bool bridge_same(const void *, const void *);
329 static void *bridge_get_softc(struct ifnet *);
330 static void bridge_stop(struct ifnet *, int);
331 static int bridge_transmit(struct ifnet *, struct mbuf *);
332 #ifdef ALTQ
333 static void bridge_altq_start(if_t);
334 static int bridge_altq_transmit(if_t, struct mbuf *);
335 #endif
336 static void bridge_qflush(struct ifnet *);
337 static struct mbuf *bridge_input(struct ifnet *, struct mbuf *);
338 static void bridge_inject(struct ifnet *, struct mbuf *);
339 static int bridge_output(struct ifnet *, struct mbuf *, struct sockaddr *,
340 struct rtentry *);
341 static int bridge_enqueue(struct bridge_softc *, struct ifnet *,
342 struct mbuf *, struct bridge_iflist *);
343 static void bridge_rtdelete(struct bridge_softc *, struct ifnet *ifp, int);
344
345 static void bridge_forward(struct bridge_softc *, struct bridge_iflist *,
346 struct mbuf *m);
347 static bool bridge_member_ifaddrs(void);
348 static void bridge_timer(void *);
349
350 static void bridge_broadcast(struct bridge_softc *, struct ifnet *,
351 struct mbuf *, int);
352 static void bridge_span(struct bridge_softc *, struct mbuf *);
353
354 static int bridge_rtupdate(struct bridge_softc *, const uint8_t *,
355 ether_vlanid_t, struct bridge_iflist *, int, uint8_t);
356 static struct ifnet *bridge_rtlookup(struct bridge_softc *, const uint8_t *,
357 ether_vlanid_t);
358 static void bridge_rttrim(struct bridge_softc *);
359 static void bridge_rtage(struct bridge_softc *);
360 static void bridge_rtflush(struct bridge_softc *, int);
361 static int bridge_rtdaddr(struct bridge_softc *, const uint8_t *,
362 ether_vlanid_t);
363 static bool bridge_vfilter_in(const struct bridge_iflist *, struct mbuf *);
364 static bool bridge_vfilter_out(const struct bridge_iflist *,
365 const struct mbuf *);
366
367 static void bridge_rtable_init(struct bridge_softc *);
368 static void bridge_rtable_fini(struct bridge_softc *);
369
370 static int bridge_rtnode_addr_cmp(const uint8_t *, const uint8_t *);
371 static struct bridge_rtnode *bridge_rtnode_lookup(struct bridge_softc *,
372 const uint8_t *, ether_vlanid_t);
373 static int bridge_rtnode_insert(struct bridge_softc *,
374 struct bridge_rtnode *);
375 static void bridge_rtnode_destroy(struct bridge_softc *,
376 struct bridge_rtnode *);
377 static void bridge_rtable_expire(struct ifnet *, int);
378 static void bridge_state_change(struct ifnet *, int);
379
380 static struct bridge_iflist *bridge_lookup_member(struct bridge_softc *,
381 const char *name);
382 static struct bridge_iflist *bridge_lookup_member_if(struct bridge_softc *,
383 struct ifnet *ifp);
384 static void bridge_delete_member(struct bridge_softc *,
385 struct bridge_iflist *, int);
386 static void bridge_delete_span(struct bridge_softc *,
387 struct bridge_iflist *);
388
389 static int bridge_ioctl_add(struct bridge_softc *, void *);
390 static int bridge_ioctl_del(struct bridge_softc *, void *);
391 static int bridge_ioctl_gifflags(struct bridge_softc *, void *);
392 static int bridge_ioctl_sifflags(struct bridge_softc *, void *);
393 static int bridge_ioctl_scache(struct bridge_softc *, void *);
394 static int bridge_ioctl_gcache(struct bridge_softc *, void *);
395 static int bridge_ioctl_gifs(struct bridge_softc *, void *);
396 static int bridge_ioctl_rts(struct bridge_softc *, void *);
397 static int bridge_ioctl_saddr(struct bridge_softc *, void *);
398 static int bridge_ioctl_sto(struct bridge_softc *, void *);
399 static int bridge_ioctl_gto(struct bridge_softc *, void *);
400 static int bridge_ioctl_daddr(struct bridge_softc *, void *);
401 static int bridge_ioctl_flush(struct bridge_softc *, void *);
402 static int bridge_ioctl_gpri(struct bridge_softc *, void *);
403 static int bridge_ioctl_spri(struct bridge_softc *, void *);
404 static int bridge_ioctl_ght(struct bridge_softc *, void *);
405 static int bridge_ioctl_sht(struct bridge_softc *, void *);
406 static int bridge_ioctl_gfd(struct bridge_softc *, void *);
407 static int bridge_ioctl_sfd(struct bridge_softc *, void *);
408 static int bridge_ioctl_gma(struct bridge_softc *, void *);
409 static int bridge_ioctl_sma(struct bridge_softc *, void *);
410 static int bridge_ioctl_sifprio(struct bridge_softc *, void *);
411 static int bridge_ioctl_sifcost(struct bridge_softc *, void *);
412 static int bridge_ioctl_sifmaxaddr(struct bridge_softc *, void *);
413 static int bridge_ioctl_sifpvid(struct bridge_softc *, void *);
414 static int bridge_ioctl_sifvlanset(struct bridge_softc *, void *);
415 static int bridge_ioctl_gifvlanset(struct bridge_softc *, void *);
416 static int bridge_ioctl_addspan(struct bridge_softc *, void *);
417 static int bridge_ioctl_delspan(struct bridge_softc *, void *);
418 static int bridge_ioctl_gbparam(struct bridge_softc *, void *);
419 static int bridge_ioctl_grte(struct bridge_softc *, void *);
420 static int bridge_ioctl_gifsstp(struct bridge_softc *, void *);
421 static int bridge_ioctl_sproto(struct bridge_softc *, void *);
422 static int bridge_ioctl_stxhc(struct bridge_softc *, void *);
423 static int bridge_ioctl_gflags(struct bridge_softc *, void *);
424 static int bridge_ioctl_sflags(struct bridge_softc *, void *);
425 static int bridge_ioctl_gdefpvid(struct bridge_softc *, void *);
426 static int bridge_ioctl_sdefpvid(struct bridge_softc *, void *);
427 static int bridge_ioctl_svlanproto(struct bridge_softc *, void *);
428 static int bridge_pfil(struct mbuf **, struct ifnet *, struct ifnet *,
429 int);
430 #ifdef INET
431 static int bridge_ip_checkbasic(struct mbuf **mp);
432 static int bridge_fragment(struct ifnet *, struct mbuf **mp,
433 struct ether_header *, int, struct llc *);
434 #endif /* INET */
435 #ifdef INET6
436 static int bridge_ip6_checkbasic(struct mbuf **mp);
437 #endif /* INET6 */
438 static void bridge_linkstate(struct ifnet *ifp);
439 static void bridge_linkcheck(struct bridge_softc *sc);
440
441 /*
442 * Use the "null" value from IEEE 802.1Q-2014 Table 9-2
443 * to indicate untagged frames.
444 */
445 #define VLANTAGOF(_m) \
446 ((_m->m_flags & M_VLANTAG) ? EVL_VLANOFTAG(_m->m_pkthdr.ether_vtag) : DOT1Q_VID_NULL)
447
448 static struct bstp_cb_ops bridge_ops = {
449 .bcb_state = bridge_state_change,
450 .bcb_rtage = bridge_rtable_expire
451 };
452
453 SYSCTL_DECL(_net_link);
454 static SYSCTL_NODE(_net_link, IFT_BRIDGE, bridge, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
455 "Bridge");
456
457 /* only pass IP[46] packets when pfil is enabled */
458 VNET_DEFINE_STATIC(int, pfil_onlyip) = 1;
459 #define V_pfil_onlyip VNET(pfil_onlyip)
460 SYSCTL_INT(_net_link_bridge, OID_AUTO, pfil_onlyip,
461 CTLFLAG_RWTUN | CTLFLAG_VNET, &VNET_NAME(pfil_onlyip), 0,
462 "Only pass IP packets when pfil is enabled");
463
464 /* run pfil hooks on the bridge interface */
465 VNET_DEFINE_STATIC(int, pfil_bridge) = 0;
466 #define V_pfil_bridge VNET(pfil_bridge)
467 SYSCTL_INT(_net_link_bridge, OID_AUTO, pfil_bridge,
468 CTLFLAG_RWTUN | CTLFLAG_VNET, &VNET_NAME(pfil_bridge), 0,
469 "Packet filter on the bridge interface");
470
471 /* layer2 filter with ipfw */
472 VNET_DEFINE_STATIC(int, pfil_ipfw);
473 #define V_pfil_ipfw VNET(pfil_ipfw)
474
475 /* layer2 ARP filter with ipfw */
476 VNET_DEFINE_STATIC(int, pfil_ipfw_arp);
477 #define V_pfil_ipfw_arp VNET(pfil_ipfw_arp)
478 SYSCTL_INT(_net_link_bridge, OID_AUTO, ipfw_arp,
479 CTLFLAG_RWTUN | CTLFLAG_VNET, &VNET_NAME(pfil_ipfw_arp), 0,
480 "Filter ARP packets through IPFW layer2");
481
482 /* run pfil hooks on the member interface */
483 VNET_DEFINE_STATIC(int, pfil_member) = 0;
484 #define V_pfil_member VNET(pfil_member)
485 SYSCTL_INT(_net_link_bridge, OID_AUTO, pfil_member,
486 CTLFLAG_RWTUN | CTLFLAG_VNET, &VNET_NAME(pfil_member), 0,
487 "Packet filter on the member interface");
488
489 /* run pfil hooks on the physical interface for locally destined packets */
490 VNET_DEFINE_STATIC(int, pfil_local_phys);
491 #define V_pfil_local_phys VNET(pfil_local_phys)
492 SYSCTL_INT(_net_link_bridge, OID_AUTO, pfil_local_phys,
493 CTLFLAG_RWTUN | CTLFLAG_VNET, &VNET_NAME(pfil_local_phys), 0,
494 "Packet filter on the physical interface for locally destined packets");
495
496 /* log STP state changes */
497 VNET_DEFINE_STATIC(int, log_stp);
498 #define V_log_stp VNET(log_stp)
499 SYSCTL_INT(_net_link_bridge, OID_AUTO, log_stp,
500 CTLFLAG_RWTUN | CTLFLAG_VNET, &VNET_NAME(log_stp), 0,
501 "Log STP state changes");
502
503 /* share MAC with first bridge member */
504 VNET_DEFINE_STATIC(int, bridge_inherit_mac);
505 #define V_bridge_inherit_mac VNET(bridge_inherit_mac)
506 SYSCTL_INT(_net_link_bridge, OID_AUTO, inherit_mac,
507 CTLFLAG_RWTUN | CTLFLAG_VNET, &VNET_NAME(bridge_inherit_mac), 0,
508 "Inherit MAC address from the first bridge member");
509
510 VNET_DEFINE_STATIC(int, allow_llz_overlap) = 0;
511 #define V_allow_llz_overlap VNET(allow_llz_overlap)
512 SYSCTL_INT(_net_link_bridge, OID_AUTO, allow_llz_overlap,
513 CTLFLAG_RW | CTLFLAG_VNET, &VNET_NAME(allow_llz_overlap), 0,
514 "Allow overlap of link-local scope "
515 "zones of a bridge interface and the member interfaces");
516
517 /* log MAC address port flapping */
518 VNET_DEFINE_STATIC(bool, log_mac_flap) = true;
519 #define V_log_mac_flap VNET(log_mac_flap)
520 SYSCTL_BOOL(_net_link_bridge, OID_AUTO, log_mac_flap,
521 CTLFLAG_RW | CTLFLAG_VNET, &VNET_NAME(log_mac_flap), true,
522 "Log MAC address port flapping");
523
524 /* allow IP addresses on bridge members */
525 VNET_DEFINE_STATIC(bool, member_ifaddrs) = true;
526 #define V_member_ifaddrs VNET(member_ifaddrs)
527 SYSCTL_BOOL(_net_link_bridge, OID_AUTO, member_ifaddrs,
528 CTLFLAG_RW | CTLFLAG_VNET, &VNET_NAME(member_ifaddrs), false,
529 "Allow layer 3 addresses on bridge members (deprecated)");
530
531 static bool
bridge_member_ifaddrs(void)532 bridge_member_ifaddrs(void)
533 {
534 return (V_member_ifaddrs);
535 }
536
537 VNET_DEFINE_STATIC(int, log_interval) = 5;
538 VNET_DEFINE_STATIC(int, log_count) = 0;
539 VNET_DEFINE_STATIC(struct timeval, log_last) = { 0 };
540
541 #define V_log_interval VNET(log_interval)
542 #define V_log_count VNET(log_count)
543 #define V_log_last VNET(log_last)
544
545 struct bridge_control {
546 int (*bc_func)(struct bridge_softc *, void *);
547 int bc_argsize;
548 int bc_flags;
549 };
550
551 #define BC_F_COPYIN 0x01 /* copy arguments in */
552 #define BC_F_COPYOUT 0x02 /* copy arguments out */
553 #define BC_F_SUSER 0x04 /* do super-user check */
554
555 static const struct bridge_control bridge_control_table[] = {
556 { bridge_ioctl_add, sizeof(struct ifbreq),
557 BC_F_COPYIN|BC_F_SUSER },
558 { bridge_ioctl_del, sizeof(struct ifbreq),
559 BC_F_COPYIN|BC_F_SUSER },
560
561 { bridge_ioctl_gifflags, sizeof(struct ifbreq),
562 BC_F_COPYIN|BC_F_COPYOUT },
563 { bridge_ioctl_sifflags, sizeof(struct ifbreq),
564 BC_F_COPYIN|BC_F_SUSER },
565
566 { bridge_ioctl_scache, sizeof(struct ifbrparam),
567 BC_F_COPYIN|BC_F_SUSER },
568 { bridge_ioctl_gcache, sizeof(struct ifbrparam),
569 BC_F_COPYOUT },
570
571 { bridge_ioctl_gifs, sizeof(struct ifbifconf),
572 BC_F_COPYIN|BC_F_COPYOUT },
573 { bridge_ioctl_rts, sizeof(struct ifbaconf),
574 BC_F_COPYIN|BC_F_COPYOUT },
575
576 { bridge_ioctl_saddr, sizeof(struct ifbareq),
577 BC_F_COPYIN|BC_F_SUSER },
578
579 { bridge_ioctl_sto, sizeof(struct ifbrparam),
580 BC_F_COPYIN|BC_F_SUSER },
581 { bridge_ioctl_gto, sizeof(struct ifbrparam),
582 BC_F_COPYOUT },
583
584 { bridge_ioctl_daddr, sizeof(struct ifbareq),
585 BC_F_COPYIN|BC_F_SUSER },
586
587 { bridge_ioctl_flush, sizeof(struct ifbreq),
588 BC_F_COPYIN|BC_F_SUSER },
589
590 { bridge_ioctl_gpri, sizeof(struct ifbrparam),
591 BC_F_COPYOUT },
592 { bridge_ioctl_spri, sizeof(struct ifbrparam),
593 BC_F_COPYIN|BC_F_SUSER },
594
595 { bridge_ioctl_ght, sizeof(struct ifbrparam),
596 BC_F_COPYOUT },
597 { bridge_ioctl_sht, sizeof(struct ifbrparam),
598 BC_F_COPYIN|BC_F_SUSER },
599
600 { bridge_ioctl_gfd, sizeof(struct ifbrparam),
601 BC_F_COPYOUT },
602 { bridge_ioctl_sfd, sizeof(struct ifbrparam),
603 BC_F_COPYIN|BC_F_SUSER },
604
605 { bridge_ioctl_gma, sizeof(struct ifbrparam),
606 BC_F_COPYOUT },
607 { bridge_ioctl_sma, sizeof(struct ifbrparam),
608 BC_F_COPYIN|BC_F_SUSER },
609
610 { bridge_ioctl_sifprio, sizeof(struct ifbreq),
611 BC_F_COPYIN|BC_F_SUSER },
612
613 { bridge_ioctl_sifcost, sizeof(struct ifbreq),
614 BC_F_COPYIN|BC_F_SUSER },
615
616 { bridge_ioctl_addspan, sizeof(struct ifbreq),
617 BC_F_COPYIN|BC_F_SUSER },
618 { bridge_ioctl_delspan, sizeof(struct ifbreq),
619 BC_F_COPYIN|BC_F_SUSER },
620
621 { bridge_ioctl_gbparam, sizeof(struct ifbropreq),
622 BC_F_COPYOUT },
623
624 { bridge_ioctl_grte, sizeof(struct ifbrparam),
625 BC_F_COPYOUT },
626
627 { bridge_ioctl_gifsstp, sizeof(struct ifbpstpconf),
628 BC_F_COPYIN|BC_F_COPYOUT },
629
630 { bridge_ioctl_sproto, sizeof(struct ifbrparam),
631 BC_F_COPYIN|BC_F_SUSER },
632
633 { bridge_ioctl_stxhc, sizeof(struct ifbrparam),
634 BC_F_COPYIN|BC_F_SUSER },
635
636 { bridge_ioctl_sifmaxaddr, sizeof(struct ifbreq),
637 BC_F_COPYIN|BC_F_SUSER },
638
639 { bridge_ioctl_sifpvid, sizeof(struct ifbreq),
640 BC_F_COPYIN|BC_F_SUSER },
641
642 { bridge_ioctl_sifvlanset, sizeof(struct ifbif_vlan_req),
643 BC_F_COPYIN|BC_F_SUSER },
644
645 { bridge_ioctl_gifvlanset, sizeof(struct ifbif_vlan_req),
646 BC_F_COPYIN|BC_F_COPYOUT },
647
648 { bridge_ioctl_gflags, sizeof(struct ifbrparam),
649 BC_F_COPYOUT },
650
651 { bridge_ioctl_sflags, sizeof(struct ifbrparam),
652 BC_F_COPYIN|BC_F_SUSER },
653
654 { bridge_ioctl_gdefpvid, sizeof(struct ifbrparam),
655 BC_F_COPYOUT },
656
657 { bridge_ioctl_sdefpvid, sizeof(struct ifbrparam),
658 BC_F_COPYIN|BC_F_SUSER },
659
660 { bridge_ioctl_svlanproto, sizeof(struct ifbreq),
661 BC_F_COPYIN|BC_F_SUSER },
662 };
663 static const int bridge_control_table_size = nitems(bridge_control_table);
664
665 VNET_DEFINE_STATIC(LIST_HEAD(, bridge_softc), bridge_list) =
666 LIST_HEAD_INITIALIZER();
667 #define V_bridge_list VNET(bridge_list)
668 #define BRIDGE_LIST_LOCK_INIT(x) sx_init(&V_bridge_list_sx, \
669 "if_bridge list")
670 #define BRIDGE_LIST_LOCK_DESTROY(x) sx_destroy(&V_bridge_list_sx)
671 #define BRIDGE_LIST_LOCK(x) sx_xlock(&V_bridge_list_sx)
672 #define BRIDGE_LIST_UNLOCK(x) sx_xunlock(&V_bridge_list_sx)
673
674 VNET_DEFINE_STATIC(struct if_clone *, bridge_cloner);
675 #define V_bridge_cloner VNET(bridge_cloner)
676
677 static const char bridge_name[] = "bridge";
678
679 static void
vnet_bridge_init(const void * unused __unused)680 vnet_bridge_init(const void *unused __unused)
681 {
682
683 V_bridge_rtnode_zone = uma_zcreate("bridge_rtnode",
684 sizeof(struct bridge_rtnode), NULL, NULL, NULL, NULL,
685 UMA_ALIGN_PTR, 0);
686 BRIDGE_LIST_LOCK_INIT();
687
688 struct if_clone_addreq req = {
689 .create_f = bridge_clone_create,
690 .destroy_f = bridge_clone_destroy,
691 .flags = IFC_F_AUTOUNIT,
692 };
693 V_bridge_cloner = ifc_attach_cloner(bridge_name, &req);
694 }
695 VNET_SYSINIT(vnet_bridge_init, SI_SUB_PROTO_IFATTACHDOMAIN, SI_ORDER_ANY,
696 vnet_bridge_init, NULL);
697
698 static void
vnet_bridge_uninit(const void * unused __unused)699 vnet_bridge_uninit(const void *unused __unused)
700 {
701
702 ifc_detach_cloner(V_bridge_cloner);
703 V_bridge_cloner = NULL;
704 BRIDGE_LIST_LOCK_DESTROY();
705
706 /* Callbacks may use the UMA zone. */
707 NET_EPOCH_DRAIN_CALLBACKS();
708
709 uma_zdestroy(V_bridge_rtnode_zone);
710 }
711 VNET_SYSUNINIT(vnet_bridge_uninit, SI_SUB_PSEUDO, SI_ORDER_ANY,
712 vnet_bridge_uninit, NULL);
713
714 static int
bridge_modevent(module_t mod,int type,void * data)715 bridge_modevent(module_t mod, int type, void *data)
716 {
717
718 switch (type) {
719 case MOD_LOAD:
720 bridge_dn_p = bridge_dummynet;
721 bridge_same_p = bridge_same;
722 bridge_get_softc_p = bridge_get_softc;
723 bridge_member_ifaddrs_p = bridge_member_ifaddrs;
724 bridge_detach_cookie = EVENTHANDLER_REGISTER(
725 ifnet_departure_event, bridge_ifdetach, NULL,
726 EVENTHANDLER_PRI_ANY);
727 break;
728 case MOD_UNLOAD:
729 EVENTHANDLER_DEREGISTER(ifnet_departure_event,
730 bridge_detach_cookie);
731 bridge_dn_p = NULL;
732 bridge_same_p = NULL;
733 bridge_get_softc_p = NULL;
734 bridge_member_ifaddrs_p = NULL;
735 break;
736 default:
737 return (EOPNOTSUPP);
738 }
739 return (0);
740 }
741
742 static moduledata_t bridge_mod = {
743 "if_bridge",
744 bridge_modevent,
745 0
746 };
747
748 DECLARE_MODULE(if_bridge, bridge_mod, SI_SUB_PSEUDO, SI_ORDER_ANY);
749 MODULE_VERSION(if_bridge, 1);
750 MODULE_DEPEND(if_bridge, bridgestp, 1, 1, 1);
751
752 /*
753 * handler for net.link.bridge.ipfw
754 */
755 static int
sysctl_pfil_ipfw(SYSCTL_HANDLER_ARGS)756 sysctl_pfil_ipfw(SYSCTL_HANDLER_ARGS)
757 {
758 int enable = V_pfil_ipfw;
759 int error;
760
761 error = sysctl_handle_int(oidp, &enable, 0, req);
762 enable &= 1;
763
764 if (enable != V_pfil_ipfw) {
765 V_pfil_ipfw = enable;
766
767 /*
768 * Disable pfil so that ipfw doesnt run twice, if the user
769 * really wants both then they can re-enable pfil_bridge and/or
770 * pfil_member. Also allow non-ip packets as ipfw can filter by
771 * layer2 type.
772 */
773 if (V_pfil_ipfw) {
774 V_pfil_onlyip = 0;
775 V_pfil_bridge = 0;
776 V_pfil_member = 0;
777 }
778 }
779
780 return (error);
781 }
782 SYSCTL_PROC(_net_link_bridge, OID_AUTO, ipfw,
783 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_VNET | CTLFLAG_NEEDGIANT,
784 &VNET_NAME(pfil_ipfw), 0, &sysctl_pfil_ipfw, "I",
785 "Layer2 filter with IPFW");
786
787 #ifdef VIMAGE
788 static void
bridge_reassign(struct ifnet * ifp,struct vnet * newvnet,char * arg)789 bridge_reassign(struct ifnet *ifp, struct vnet *newvnet, char *arg)
790 {
791 struct bridge_softc *sc = ifp->if_softc;
792 struct bridge_iflist *bif;
793
794 BRIDGE_LOCK(sc);
795
796 while ((bif = CK_LIST_FIRST(&sc->sc_iflist)) != NULL)
797 bridge_delete_member(sc, bif, 0);
798
799 while ((bif = CK_LIST_FIRST(&sc->sc_spanlist)) != NULL) {
800 bridge_delete_span(sc, bif);
801 }
802
803 BRIDGE_UNLOCK(sc);
804 }
805 #endif
806
807 /*
808 * bridge_get_softc:
809 *
810 * Return the bridge softc for an ifnet.
811 */
812 static void *
bridge_get_softc(struct ifnet * ifp)813 bridge_get_softc(struct ifnet *ifp)
814 {
815 struct bridge_iflist *bif;
816
817 NET_EPOCH_ASSERT();
818
819 bif = ifp->if_bridge;
820 if (bif == NULL)
821 return (NULL);
822 return (bif->bif_sc);
823 }
824
825 /*
826 * bridge_same:
827 *
828 * Return true if two interfaces are in the same bridge. This is only used by
829 * bridgestp via bridge_same_p.
830 */
831 static bool
bridge_same(const void * bifap,const void * bifbp)832 bridge_same(const void *bifap, const void *bifbp)
833 {
834 const struct bridge_iflist *bifa = bifap, *bifb = bifbp;
835
836 NET_EPOCH_ASSERT();
837
838 if (bifa == NULL || bifb == NULL)
839 return (false);
840
841 return (bifa->bif_sc == bifb->bif_sc);
842 }
843
844 /*
845 * bridge_clone_create:
846 *
847 * Create a new bridge instance.
848 */
849 static int
bridge_clone_create(struct if_clone * ifc,char * name,size_t len,struct ifc_data * ifd,struct ifnet ** ifpp)850 bridge_clone_create(struct if_clone *ifc, char *name, size_t len,
851 struct ifc_data *ifd, struct ifnet **ifpp)
852 {
853 struct bridge_softc *sc;
854 struct ifnet *ifp;
855
856 sc = malloc(sizeof(*sc), M_DEVBUF, M_WAITOK|M_ZERO);
857 ifp = sc->sc_ifp = if_alloc(IFT_ETHER);
858
859 BRIDGE_LOCK_INIT(sc);
860 sc->sc_brtmax = BRIDGE_RTABLE_MAX;
861 sc->sc_brttimeout = BRIDGE_RTABLE_TIMEOUT;
862
863 /* Initialize our routing table. */
864 bridge_rtable_init(sc);
865
866 callout_init_mtx(&sc->sc_brcallout, &sc->sc_rt_mtx, 0);
867
868 CK_LIST_INIT(&sc->sc_iflist);
869 CK_LIST_INIT(&sc->sc_spanlist);
870
871 ifp->if_softc = sc;
872 if_initname(ifp, bridge_name, ifd->unit);
873 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
874 ifp->if_capabilities = ifp->if_capenable = IFCAP_VLAN_HWTAGGING;
875 ifp->if_ioctl = bridge_ioctl;
876 #ifdef ALTQ
877 ifp->if_start = bridge_altq_start;
878 ifp->if_transmit = bridge_altq_transmit;
879 IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen);
880 ifp->if_snd.ifq_drv_maxlen = 0;
881 IFQ_SET_READY(&ifp->if_snd);
882 #else
883 ifp->if_transmit = bridge_transmit;
884 #endif
885 ifp->if_qflush = bridge_qflush;
886 ifp->if_init = bridge_init;
887 ifp->if_type = IFT_BRIDGE;
888
889 ether_gen_addr(ifp, &sc->sc_defaddr);
890
891 bstp_attach(&sc->sc_stp, &bridge_ops);
892 ether_ifattach(ifp, sc->sc_defaddr.octet);
893 /* Now undo some of the damage... */
894 ifp->if_baudrate = 0;
895 #ifdef VIMAGE
896 ifp->if_reassign = bridge_reassign;
897 #endif
898 sc->sc_if_input = ifp->if_input; /* ether_input */
899 ifp->if_input = bridge_inject;
900
901 /*
902 * Allow BRIDGE_INPUT() to pass in packets originating from the bridge
903 * itself via bridge_inject(). This is required for netmap but
904 * otherwise has no effect.
905 */
906 ifp->if_bridge_input = bridge_input;
907
908 BRIDGE_LIST_LOCK();
909 LIST_INSERT_HEAD(&V_bridge_list, sc, sc_list);
910 BRIDGE_LIST_UNLOCK();
911 *ifpp = ifp;
912
913 return (0);
914 }
915
916 static void
bridge_clone_destroy_cb(struct epoch_context * ctx)917 bridge_clone_destroy_cb(struct epoch_context *ctx)
918 {
919 struct bridge_softc *sc;
920
921 sc = __containerof(ctx, struct bridge_softc, sc_epoch_ctx);
922
923 BRIDGE_LOCK_DESTROY(sc);
924 free(sc, M_DEVBUF);
925 }
926
927 /*
928 * bridge_clone_destroy:
929 *
930 * Destroy a bridge instance.
931 */
932 static int
bridge_clone_destroy(struct if_clone * ifc,struct ifnet * ifp,uint32_t flags)933 bridge_clone_destroy(struct if_clone *ifc, struct ifnet *ifp, uint32_t flags)
934 {
935 struct bridge_softc *sc = ifp->if_softc;
936 struct bridge_iflist *bif;
937 struct epoch_tracker et;
938
939 BRIDGE_LOCK(sc);
940
941 bridge_stop(ifp, 1);
942 ifp->if_flags &= ~IFF_UP;
943
944 while ((bif = CK_LIST_FIRST(&sc->sc_iflist)) != NULL)
945 bridge_delete_member(sc, bif, 0);
946
947 while ((bif = CK_LIST_FIRST(&sc->sc_spanlist)) != NULL) {
948 bridge_delete_span(sc, bif);
949 }
950
951 /* Tear down the routing table. */
952 bridge_rtable_fini(sc);
953
954 BRIDGE_UNLOCK(sc);
955
956 NET_EPOCH_ENTER(et);
957
958 callout_drain(&sc->sc_brcallout);
959
960 BRIDGE_LIST_LOCK();
961 LIST_REMOVE(sc, sc_list);
962 BRIDGE_LIST_UNLOCK();
963
964 bstp_detach(&sc->sc_stp);
965 #ifdef ALTQ
966 IFQ_PURGE(&ifp->if_snd);
967 #endif
968 NET_EPOCH_EXIT(et);
969
970 ether_ifdetach(ifp);
971 if_free(ifp);
972
973 NET_EPOCH_CALL(bridge_clone_destroy_cb, &sc->sc_epoch_ctx);
974
975 return (0);
976 }
977
978 /*
979 * bridge_ioctl:
980 *
981 * Handle a control request from the operator.
982 */
983 static int
bridge_ioctl(struct ifnet * ifp,u_long cmd,caddr_t data)984 bridge_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
985 {
986 struct bridge_softc *sc = ifp->if_softc;
987 struct ifreq *ifr = (struct ifreq *)data;
988 struct bridge_iflist *bif;
989 struct thread *td = curthread;
990 union {
991 struct ifbreq ifbreq;
992 struct ifbifconf ifbifconf;
993 struct ifbareq ifbareq;
994 struct ifbaconf ifbaconf;
995 struct ifbrparam ifbrparam;
996 struct ifbropreq ifbropreq;
997 struct ifbif_vlan_req ifvlanreq;
998 } args;
999 struct ifdrv *ifd = (struct ifdrv *) data;
1000 const struct bridge_control *bc;
1001 int error = 0, oldmtu;
1002
1003 BRIDGE_LOCK(sc);
1004
1005 switch (cmd) {
1006 case SIOCADDMULTI:
1007 case SIOCDELMULTI:
1008 break;
1009
1010 case SIOCGDRVSPEC:
1011 case SIOCSDRVSPEC:
1012 if (ifd->ifd_cmd >= bridge_control_table_size) {
1013 error = EXTERROR(EINVAL, "Invalid control command");
1014 break;
1015 }
1016 bc = &bridge_control_table[ifd->ifd_cmd];
1017
1018 if (cmd == SIOCGDRVSPEC &&
1019 (bc->bc_flags & BC_F_COPYOUT) == 0) {
1020 error = EXTERROR(EINVAL,
1021 "Inappropriate ioctl for command "
1022 "(expected SIOCSDRVSPEC)");
1023 break;
1024 }
1025 else if (cmd == SIOCSDRVSPEC &&
1026 (bc->bc_flags & BC_F_COPYOUT) != 0) {
1027 error = EXTERROR(EINVAL,
1028 "Inappropriate ioctl for command "
1029 "(expected SIOCGDRVSPEC)");
1030 break;
1031 }
1032
1033 if (bc->bc_flags & BC_F_SUSER) {
1034 error = priv_check(td, PRIV_NET_BRIDGE);
1035 if (error) {
1036 EXTERROR(error, "PRIV_NET_BRIDGE required");
1037 break;
1038 }
1039 }
1040
1041 if (ifd->ifd_len != bc->bc_argsize ||
1042 ifd->ifd_len > sizeof(args)) {
1043 error = EXTERROR(EINVAL, "Invalid argument size");
1044 break;
1045 }
1046
1047 bzero(&args, sizeof(args));
1048 if (bc->bc_flags & BC_F_COPYIN) {
1049 error = copyin(ifd->ifd_data, &args, ifd->ifd_len);
1050 if (error)
1051 break;
1052 }
1053
1054 oldmtu = ifp->if_mtu;
1055 error = (*bc->bc_func)(sc, &args);
1056 if (error)
1057 break;
1058
1059 /*
1060 * Bridge MTU may change during addition of the first port.
1061 * If it did, do network layer specific procedure.
1062 */
1063 if (ifp->if_mtu != oldmtu)
1064 if_notifymtu(ifp);
1065
1066 if (bc->bc_flags & BC_F_COPYOUT)
1067 error = copyout(&args, ifd->ifd_data, ifd->ifd_len);
1068
1069 break;
1070
1071 case SIOCSIFFLAGS:
1072 if (!(ifp->if_flags & IFF_UP) &&
1073 (ifp->if_drv_flags & IFF_DRV_RUNNING)) {
1074 /*
1075 * If interface is marked down and it is running,
1076 * then stop and disable it.
1077 */
1078 bridge_stop(ifp, 1);
1079 } else if ((ifp->if_flags & IFF_UP) &&
1080 !(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
1081 /*
1082 * If interface is marked up and it is stopped, then
1083 * start it.
1084 */
1085 BRIDGE_UNLOCK(sc);
1086 (*ifp->if_init)(sc);
1087 BRIDGE_LOCK(sc);
1088 }
1089 break;
1090
1091 case SIOCSIFMTU:
1092 oldmtu = sc->sc_ifp->if_mtu;
1093
1094 if (ifr->ifr_mtu < IF_MINMTU) {
1095 error = EXTERROR(EINVAL,
1096 "Requested MTU is lower than IF_MINMTU");
1097 break;
1098 }
1099 if (CK_LIST_EMPTY(&sc->sc_iflist)) {
1100 sc->sc_ifp->if_mtu = ifr->ifr_mtu;
1101 break;
1102 }
1103 CK_LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
1104 error = (*bif->bif_ifp->if_ioctl)(bif->bif_ifp,
1105 SIOCSIFMTU, (caddr_t)ifr);
1106 if (error != 0) {
1107 log(LOG_NOTICE, "%s: invalid MTU: %u for"
1108 " member %s\n", sc->sc_ifp->if_xname,
1109 ifr->ifr_mtu,
1110 bif->bif_ifp->if_xname);
1111 error = EINVAL;
1112 break;
1113 }
1114 }
1115 if (error) {
1116 /* Restore the previous MTU on all member interfaces. */
1117 ifr->ifr_mtu = oldmtu;
1118 CK_LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
1119 (*bif->bif_ifp->if_ioctl)(bif->bif_ifp,
1120 SIOCSIFMTU, (caddr_t)ifr);
1121 }
1122 EXTERROR(error,
1123 "Failed to set MTU on member interface");
1124 } else {
1125 sc->sc_ifp->if_mtu = ifr->ifr_mtu;
1126 }
1127 break;
1128 default:
1129 /*
1130 * drop the lock as ether_ioctl() will call bridge_start() and
1131 * cause the lock to be recursed.
1132 */
1133 BRIDGE_UNLOCK(sc);
1134 error = ether_ioctl(ifp, cmd, data);
1135 BRIDGE_LOCK(sc);
1136 break;
1137 }
1138
1139 BRIDGE_UNLOCK(sc);
1140
1141 return (error);
1142 }
1143
1144 /*
1145 * bridge_mutecaps:
1146 *
1147 * Clear or restore unwanted capabilities on the member interface
1148 */
1149 static void
bridge_mutecaps(struct bridge_softc * sc)1150 bridge_mutecaps(struct bridge_softc *sc)
1151 {
1152 struct bridge_iflist *bif;
1153 int enabled, mask;
1154
1155 BRIDGE_LOCK_ASSERT(sc);
1156
1157 /* Initial bitmask of capabilities to test */
1158 mask = BRIDGE_IFCAPS_MASK;
1159
1160 CK_LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
1161 /* Every member must support it or it's disabled */
1162 mask &= bif->bif_savedcaps;
1163 }
1164
1165 CK_LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
1166 enabled = bif->bif_ifp->if_capenable;
1167 enabled &= ~BRIDGE_IFCAPS_STRIP;
1168 /* Strip off mask bits and enable them again if allowed */
1169 enabled &= ~BRIDGE_IFCAPS_MASK;
1170 enabled |= mask;
1171 bridge_set_ifcap(sc, bif, enabled);
1172 }
1173 }
1174
1175 static void
bridge_set_ifcap(struct bridge_softc * sc,struct bridge_iflist * bif,int set)1176 bridge_set_ifcap(struct bridge_softc *sc, struct bridge_iflist *bif, int set)
1177 {
1178 struct ifnet *ifp = bif->bif_ifp;
1179 struct ifreq ifr;
1180 int error, mask, stuck;
1181
1182 bzero(&ifr, sizeof(ifr));
1183 ifr.ifr_reqcap = set;
1184
1185 if (ifp->if_capenable != set) {
1186 error = (*ifp->if_ioctl)(ifp, SIOCSIFCAP, (caddr_t)&ifr);
1187 if (error)
1188 if_printf(sc->sc_ifp,
1189 "error setting capabilities on %s: %d\n",
1190 ifp->if_xname, error);
1191 mask = BRIDGE_IFCAPS_MASK | BRIDGE_IFCAPS_STRIP;
1192 stuck = ifp->if_capenable & mask & ~set;
1193 if (stuck != 0)
1194 if_printf(sc->sc_ifp,
1195 "can't disable some capabilities on %s: 0x%x\n",
1196 ifp->if_xname, stuck);
1197 }
1198 }
1199
1200 /*
1201 * bridge_lookup_member:
1202 *
1203 * Lookup a bridge member interface.
1204 */
1205 static struct bridge_iflist *
bridge_lookup_member(struct bridge_softc * sc,const char * name)1206 bridge_lookup_member(struct bridge_softc *sc, const char *name)
1207 {
1208 struct bridge_iflist *bif;
1209 struct ifnet *ifp;
1210
1211 BRIDGE_LOCK_OR_NET_EPOCH_ASSERT(sc);
1212
1213 CK_LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
1214 ifp = bif->bif_ifp;
1215 if (strcmp(ifp->if_xname, name) == 0)
1216 return (bif);
1217 }
1218
1219 return (NULL);
1220 }
1221
1222 /*
1223 * bridge_lookup_member_if:
1224 *
1225 * Lookup a bridge member interface by ifnet*.
1226 */
1227 static struct bridge_iflist *
bridge_lookup_member_if(struct bridge_softc * sc,struct ifnet * member_ifp)1228 bridge_lookup_member_if(struct bridge_softc *sc, struct ifnet *member_ifp)
1229 {
1230 BRIDGE_LOCK_OR_NET_EPOCH_ASSERT(sc);
1231 return (member_ifp->if_bridge);
1232 }
1233
1234 static void
bridge_delete_member_cb(struct epoch_context * ctx)1235 bridge_delete_member_cb(struct epoch_context *ctx)
1236 {
1237 struct bridge_iflist *bif;
1238
1239 bif = __containerof(ctx, struct bridge_iflist, bif_epoch_ctx);
1240
1241 free(bif, M_DEVBUF);
1242 }
1243
1244 /*
1245 * bridge_delete_member:
1246 *
1247 * Delete the specified member interface.
1248 */
1249 static void
bridge_delete_member(struct bridge_softc * sc,struct bridge_iflist * bif,int gone)1250 bridge_delete_member(struct bridge_softc *sc, struct bridge_iflist *bif,
1251 int gone)
1252 {
1253 struct ifnet *ifs = bif->bif_ifp;
1254 struct ifnet *fif = NULL;
1255 struct bridge_iflist *bifl;
1256
1257 BRIDGE_LOCK_ASSERT(sc);
1258
1259 if (bif->bif_flags & IFBIF_STP)
1260 bstp_disable(&bif->bif_stp);
1261
1262 ifs->if_bridge = NULL;
1263 CK_LIST_REMOVE(bif, bif_next);
1264
1265 /*
1266 * If removing the interface that gave the bridge its mac address, set
1267 * the mac address of the bridge to the address of the next member, or
1268 * to its default address if no members are left.
1269 */
1270 if (V_bridge_inherit_mac && sc->sc_ifaddr == ifs) {
1271 if (CK_LIST_EMPTY(&sc->sc_iflist)) {
1272 bcopy(&sc->sc_defaddr,
1273 IF_LLADDR(sc->sc_ifp), ETHER_ADDR_LEN);
1274 sc->sc_ifaddr = NULL;
1275 } else {
1276 bifl = CK_LIST_FIRST(&sc->sc_iflist);
1277 fif = bifl->bif_ifp;
1278 bcopy(IF_LLADDR(fif),
1279 IF_LLADDR(sc->sc_ifp), ETHER_ADDR_LEN);
1280 sc->sc_ifaddr = fif;
1281 }
1282 EVENTHANDLER_INVOKE(iflladdr_event, sc->sc_ifp);
1283 }
1284
1285 bridge_linkcheck(sc);
1286 bridge_mutecaps(sc); /* recalcuate now this interface is removed */
1287 BRIDGE_RT_LOCK(sc);
1288 bridge_rtdelete(sc, ifs, IFBF_FLUSHALL);
1289 BRIDGE_RT_UNLOCK(sc);
1290 KASSERT(bif->bif_addrcnt == 0,
1291 ("%s: %d bridge routes referenced", __func__, bif->bif_addrcnt));
1292
1293 ifs->if_bridge_output = NULL;
1294 ifs->if_bridge_input = NULL;
1295 ifs->if_bridge_linkstate = NULL;
1296 if (!gone) {
1297 switch (ifs->if_type) {
1298 case IFT_ETHER:
1299 case IFT_L2VLAN:
1300 /*
1301 * Take the interface out of promiscuous mode, but only
1302 * if it was promiscuous in the first place. It might
1303 * not be if we're in the bridge_ioctl_add() error path.
1304 */
1305 if (ifs->if_flags & IFF_PROMISC)
1306 (void) ifpromisc(ifs, 0);
1307 break;
1308
1309 case IFT_GIF:
1310 break;
1311
1312 default:
1313 #ifdef DIAGNOSTIC
1314 panic("bridge_delete_member: impossible");
1315 #endif
1316 break;
1317 }
1318 /* Re-enable any interface capabilities */
1319 bridge_set_ifcap(sc, bif, bif->bif_savedcaps);
1320 }
1321 bstp_destroy(&bif->bif_stp); /* prepare to free */
1322
1323 NET_EPOCH_CALL(bridge_delete_member_cb, &bif->bif_epoch_ctx);
1324 }
1325
1326 /*
1327 * bridge_delete_span:
1328 *
1329 * Delete the specified span interface.
1330 */
1331 static void
bridge_delete_span(struct bridge_softc * sc,struct bridge_iflist * bif)1332 bridge_delete_span(struct bridge_softc *sc, struct bridge_iflist *bif)
1333 {
1334 BRIDGE_LOCK_ASSERT(sc);
1335
1336 KASSERT(bif->bif_ifp->if_bridge == NULL,
1337 ("%s: not a span interface", __func__));
1338
1339 CK_LIST_REMOVE(bif, bif_next);
1340
1341 NET_EPOCH_CALL(bridge_delete_member_cb, &bif->bif_epoch_ctx);
1342 }
1343
1344 static int
bridge_ioctl_add(struct bridge_softc * sc,void * arg)1345 bridge_ioctl_add(struct bridge_softc *sc, void *arg)
1346 {
1347 struct ifbreq *req = arg;
1348 struct bridge_iflist *bif = NULL;
1349 struct ifnet *ifs;
1350 int error = 0;
1351
1352 ifs = ifunit(req->ifbr_ifsname);
1353 if (ifs == NULL)
1354 return (EXTERROR(ENOENT, "No such interface",
1355 req->ifbr_ifsname));
1356 if (ifs->if_ioctl == NULL) /* must be supported */
1357 return (EXTERROR(EINVAL, "Interface must support ioctl(2)"));
1358
1359 /*
1360 * If the new interface is a vlan(4), it could be a bridge SVI.
1361 * Don't allow such things to be added to bridges.
1362 */
1363 if (ifs->if_type == IFT_L2VLAN) {
1364 struct ifnet *parent;
1365 struct epoch_tracker et;
1366 bool is_bridge;
1367
1368 /*
1369 * Entering NET_EPOCH with BRIDGE_LOCK held, but this is okay
1370 * since we don't sleep here.
1371 */
1372 NET_EPOCH_ENTER(et);
1373 parent = VLAN_TRUNKDEV(ifs);
1374 is_bridge = (parent != NULL && parent->if_type == IFT_BRIDGE);
1375 NET_EPOCH_EXIT(et);
1376
1377 if (is_bridge)
1378 return (EXTERROR(EINVAL,
1379 "Bridge SVI cannot be added to a bridge"));
1380 }
1381
1382 /* If it's in the span list, it can't be a member. */
1383 CK_LIST_FOREACH(bif, &sc->sc_spanlist, bif_next)
1384 if (ifs == bif->bif_ifp)
1385 return (EXTERROR(EBUSY,
1386 "Span interface cannot be a member"));
1387
1388 if (ifs->if_bridge) {
1389 struct bridge_iflist *sbif = ifs->if_bridge;
1390 if (sbif->bif_sc == sc)
1391 return (EXTERROR(EEXIST,
1392 "Interface is already a member of this bridge"));
1393
1394 return (EXTERROR(EBUSY,
1395 "Interface is already a member of another bridge"));
1396 }
1397
1398 switch (ifs->if_type) {
1399 case IFT_ETHER:
1400 case IFT_L2VLAN:
1401 case IFT_GIF:
1402 /* permitted interface types */
1403 break;
1404 default:
1405 return (EXTERROR(EINVAL, "Unsupported interface type"));
1406 }
1407
1408 #ifdef INET6
1409 /*
1410 * Two valid inet6 addresses with link-local scope must not be
1411 * on the parent interface and the member interfaces at the
1412 * same time. This restriction is needed to prevent violation
1413 * of link-local scope zone. Attempts to add a member
1414 * interface which has inet6 addresses when the parent has
1415 * inet6 triggers removal of all inet6 addresses on the member
1416 * interface.
1417 */
1418
1419 /* Check if the parent interface has a link-local scope addr. */
1420 if (V_allow_llz_overlap == 0 &&
1421 in6ifa_llaonifp(sc->sc_ifp) != NULL) {
1422 /*
1423 * If any, remove all inet6 addresses from the member
1424 * interfaces.
1425 */
1426 CK_LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
1427 if (in6ifa_llaonifp(bif->bif_ifp)) {
1428 in6_ifdetach(bif->bif_ifp);
1429 if_printf(sc->sc_ifp,
1430 "IPv6 addresses on %s have been removed "
1431 "before adding it as a member to prevent "
1432 "IPv6 address scope violation.\n",
1433 bif->bif_ifp->if_xname);
1434 }
1435 }
1436 if (in6ifa_llaonifp(ifs)) {
1437 in6_ifdetach(ifs);
1438 if_printf(sc->sc_ifp,
1439 "IPv6 addresses on %s have been removed "
1440 "before adding it as a member to prevent "
1441 "IPv6 address scope violation.\n",
1442 ifs->if_xname);
1443 }
1444 }
1445 #endif
1446
1447 /*
1448 * If member_ifaddrs is disabled, do not allow an interface with
1449 * assigned IP addresses to be added to a bridge. Skip this check
1450 * for gif interfaces, because the IP address assigned to a gif
1451 * interface is separate from the bridge's Ethernet segment.
1452 */
1453 if (ifs->if_type != IFT_GIF) {
1454 struct ifaddr *ifa;
1455
1456 CK_STAILQ_FOREACH(ifa, &ifs->if_addrhead, ifa_link) {
1457 if (ifa->ifa_addr->sa_family != AF_INET &&
1458 ifa->ifa_addr->sa_family != AF_INET6)
1459 continue;
1460
1461 if (V_member_ifaddrs) {
1462 if_printf(sc->sc_ifp,
1463 "WARNING: Adding member interface %s which "
1464 "has an IP address assigned is deprecated "
1465 "and will be unsupported in a future "
1466 "release.\n", ifs->if_xname);
1467 break;
1468 } else {
1469 return (EXTERROR(EINVAL,
1470 "Member interface may not have "
1471 "an IP address assigned"));
1472 }
1473 }
1474 }
1475
1476 /* Allow the first Ethernet member to define the MTU */
1477 if (CK_LIST_EMPTY(&sc->sc_iflist))
1478 sc->sc_ifp->if_mtu = ifs->if_mtu;
1479 else if (sc->sc_ifp->if_mtu != ifs->if_mtu) {
1480 struct ifreq ifr;
1481
1482 snprintf(ifr.ifr_name, sizeof(ifr.ifr_name), "%s",
1483 ifs->if_xname);
1484 ifr.ifr_mtu = sc->sc_ifp->if_mtu;
1485
1486 error = (*ifs->if_ioctl)(ifs,
1487 SIOCSIFMTU, (caddr_t)&ifr);
1488 if (error != 0) {
1489 log(LOG_NOTICE, "%s: invalid MTU: %u for"
1490 " new member %s\n", sc->sc_ifp->if_xname,
1491 ifr.ifr_mtu,
1492 ifs->if_xname);
1493 return (EXTERROR(EINVAL,
1494 "Failed to set MTU on new member"));
1495 }
1496 }
1497
1498 bif = malloc(sizeof(*bif), M_DEVBUF, M_NOWAIT|M_ZERO);
1499 if (bif == NULL)
1500 return (ENOMEM);
1501
1502 bif->bif_sc = sc;
1503 bif->bif_ifp = ifs;
1504 bif->bif_flags = IFBIF_LEARNING | IFBIF_DISCOVER;
1505 bif->bif_savedcaps = ifs->if_capenable;
1506 bif->bif_vlanproto = ETHERTYPE_VLAN;
1507 bif->bif_pvid = sc->sc_defpvid;
1508 if (sc->sc_flags & IFBRF_DEFQINQ)
1509 bif->bif_flags |= IFBIF_QINQ;
1510
1511 /*
1512 * Assign the interface's MAC address to the bridge if it's the first
1513 * member and the MAC address of the bridge has not been changed from
1514 * the default randomly generated one.
1515 */
1516 if (V_bridge_inherit_mac && CK_LIST_EMPTY(&sc->sc_iflist) &&
1517 !memcmp(IF_LLADDR(sc->sc_ifp), sc->sc_defaddr.octet, ETHER_ADDR_LEN)) {
1518 bcopy(IF_LLADDR(ifs), IF_LLADDR(sc->sc_ifp), ETHER_ADDR_LEN);
1519 sc->sc_ifaddr = ifs;
1520 EVENTHANDLER_INVOKE(iflladdr_event, sc->sc_ifp);
1521 }
1522
1523 ifs->if_bridge = bif;
1524 ifs->if_bridge_output = bridge_output;
1525 ifs->if_bridge_input = bridge_input;
1526 ifs->if_bridge_linkstate = bridge_linkstate;
1527 bstp_create(&sc->sc_stp, &bif->bif_stp, bif->bif_ifp);
1528 /*
1529 * XXX: XLOCK HERE!?!
1530 *
1531 * NOTE: insert_***HEAD*** should be safe for the traversals.
1532 */
1533 CK_LIST_INSERT_HEAD(&sc->sc_iflist, bif, bif_next);
1534
1535 /* Set interface capabilities to the intersection set of all members */
1536 bridge_mutecaps(sc);
1537 bridge_linkcheck(sc);
1538
1539 /* Place the interface into promiscuous mode */
1540 switch (ifs->if_type) {
1541 case IFT_ETHER:
1542 case IFT_L2VLAN:
1543 error = ifpromisc(ifs, 1);
1544 break;
1545 }
1546
1547 if (error)
1548 bridge_delete_member(sc, bif, 0);
1549 return (error);
1550 }
1551
1552 static int
bridge_ioctl_del(struct bridge_softc * sc,void * arg)1553 bridge_ioctl_del(struct bridge_softc *sc, void *arg)
1554 {
1555 struct ifbreq *req = arg;
1556 struct bridge_iflist *bif;
1557
1558 bif = bridge_lookup_member(sc, req->ifbr_ifsname);
1559 if (bif == NULL)
1560 return (EXTERROR(ENOENT, "Interface is not a bridge member"));
1561
1562 bridge_delete_member(sc, bif, 0);
1563
1564 return (0);
1565 }
1566
1567 static int
bridge_ioctl_gifflags(struct bridge_softc * sc,void * arg)1568 bridge_ioctl_gifflags(struct bridge_softc *sc, void *arg)
1569 {
1570 struct ifbreq *req = arg;
1571 struct bridge_iflist *bif;
1572 struct bstp_port *bp;
1573
1574 bif = bridge_lookup_member(sc, req->ifbr_ifsname);
1575 if (bif == NULL)
1576 return (EXTERROR(ENOENT, "Interface is not a bridge member"));
1577
1578 bp = &bif->bif_stp;
1579 req->ifbr_ifsflags = bif->bif_flags;
1580 req->ifbr_state = bp->bp_state;
1581 req->ifbr_priority = bp->bp_priority;
1582 req->ifbr_path_cost = bp->bp_path_cost;
1583 req->ifbr_portno = bif->bif_ifp->if_index & 0xfff;
1584 req->ifbr_proto = bp->bp_protover;
1585 req->ifbr_role = bp->bp_role;
1586 req->ifbr_stpflags = bp->bp_flags;
1587 req->ifbr_addrcnt = bif->bif_addrcnt;
1588 req->ifbr_addrmax = bif->bif_addrmax;
1589 req->ifbr_addrexceeded = bif->bif_addrexceeded;
1590 req->ifbr_pvid = bif->bif_pvid;
1591 req->ifbr_vlanproto = bif->bif_vlanproto;
1592
1593 /* Copy STP state options as flags */
1594 if (bp->bp_operedge)
1595 req->ifbr_ifsflags |= IFBIF_BSTP_EDGE;
1596 if (bp->bp_flags & BSTP_PORT_AUTOEDGE)
1597 req->ifbr_ifsflags |= IFBIF_BSTP_AUTOEDGE;
1598 if (bp->bp_ptp_link)
1599 req->ifbr_ifsflags |= IFBIF_BSTP_PTP;
1600 if (bp->bp_flags & BSTP_PORT_AUTOPTP)
1601 req->ifbr_ifsflags |= IFBIF_BSTP_AUTOPTP;
1602 if (bp->bp_flags & BSTP_PORT_ADMEDGE)
1603 req->ifbr_ifsflags |= IFBIF_BSTP_ADMEDGE;
1604 if (bp->bp_flags & BSTP_PORT_ADMCOST)
1605 req->ifbr_ifsflags |= IFBIF_BSTP_ADMCOST;
1606 return (0);
1607 }
1608
1609 static int
bridge_ioctl_sifflags(struct bridge_softc * sc,void * arg)1610 bridge_ioctl_sifflags(struct bridge_softc *sc, void *arg)
1611 {
1612 struct epoch_tracker et;
1613 struct ifbreq *req = arg;
1614 struct bridge_iflist *bif;
1615 struct bstp_port *bp;
1616 int error;
1617
1618 bif = bridge_lookup_member(sc, req->ifbr_ifsname);
1619 if (bif == NULL)
1620 return (EXTERROR(ENOENT, "Interface is not a bridge member"));
1621 bp = &bif->bif_stp;
1622
1623 if (req->ifbr_ifsflags & IFBIF_SPAN)
1624 /* SPAN is readonly */
1625 return (EXTERROR(EINVAL, "Span interface cannot be modified"));
1626
1627 NET_EPOCH_ENTER(et);
1628
1629 if (req->ifbr_ifsflags & IFBIF_STP) {
1630 if ((bif->bif_flags & IFBIF_STP) == 0) {
1631 error = bstp_enable(&bif->bif_stp);
1632 if (error) {
1633 NET_EPOCH_EXIT(et);
1634 return (EXTERROR(error,
1635 "Failed to enable STP"));
1636 }
1637 }
1638 } else {
1639 if ((bif->bif_flags & IFBIF_STP) != 0)
1640 bstp_disable(&bif->bif_stp);
1641 }
1642
1643 /* Pass on STP flags */
1644 bstp_set_edge(bp, req->ifbr_ifsflags & IFBIF_BSTP_EDGE ? 1 : 0);
1645 bstp_set_autoedge(bp, req->ifbr_ifsflags & IFBIF_BSTP_AUTOEDGE ? 1 : 0);
1646 bstp_set_ptp(bp, req->ifbr_ifsflags & IFBIF_BSTP_PTP ? 1 : 0);
1647 bstp_set_autoptp(bp, req->ifbr_ifsflags & IFBIF_BSTP_AUTOPTP ? 1 : 0);
1648
1649 /* Save the bits relating to the bridge */
1650 bif->bif_flags = req->ifbr_ifsflags & IFBIFMASK;
1651
1652 NET_EPOCH_EXIT(et);
1653
1654 return (0);
1655 }
1656
1657 static int
bridge_ioctl_scache(struct bridge_softc * sc,void * arg)1658 bridge_ioctl_scache(struct bridge_softc *sc, void *arg)
1659 {
1660 struct ifbrparam *param = arg;
1661
1662 sc->sc_brtmax = param->ifbrp_csize;
1663 bridge_rttrim(sc);
1664
1665 return (0);
1666 }
1667
1668 static int
bridge_ioctl_gcache(struct bridge_softc * sc,void * arg)1669 bridge_ioctl_gcache(struct bridge_softc *sc, void *arg)
1670 {
1671 struct ifbrparam *param = arg;
1672
1673 param->ifbrp_csize = sc->sc_brtmax;
1674
1675 return (0);
1676 }
1677
1678 static int
bridge_ioctl_gifs(struct bridge_softc * sc,void * arg)1679 bridge_ioctl_gifs(struct bridge_softc *sc, void *arg)
1680 {
1681 struct ifbifconf *bifc = arg;
1682 struct bridge_iflist *bif;
1683 struct ifbreq breq;
1684 char *buf, *outbuf;
1685 int count, buflen, len, error = 0;
1686
1687 count = 0;
1688 CK_LIST_FOREACH(bif, &sc->sc_iflist, bif_next)
1689 count++;
1690 CK_LIST_FOREACH(bif, &sc->sc_spanlist, bif_next)
1691 count++;
1692
1693 buflen = sizeof(breq) * count;
1694 if (bifc->ifbic_len == 0) {
1695 bifc->ifbic_len = buflen;
1696 return (0);
1697 }
1698 outbuf = malloc(buflen, M_TEMP, M_NOWAIT | M_ZERO);
1699 if (outbuf == NULL)
1700 return (ENOMEM);
1701
1702 count = 0;
1703 buf = outbuf;
1704 len = min(bifc->ifbic_len, buflen);
1705 bzero(&breq, sizeof(breq));
1706 CK_LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
1707 if (len < sizeof(breq))
1708 break;
1709
1710 strlcpy(breq.ifbr_ifsname, bif->bif_ifp->if_xname,
1711 sizeof(breq.ifbr_ifsname));
1712 /* Fill in the ifbreq structure */
1713 error = bridge_ioctl_gifflags(sc, &breq);
1714 if (error)
1715 break;
1716 memcpy(buf, &breq, sizeof(breq));
1717 count++;
1718 buf += sizeof(breq);
1719 len -= sizeof(breq);
1720 }
1721 CK_LIST_FOREACH(bif, &sc->sc_spanlist, bif_next) {
1722 if (len < sizeof(breq))
1723 break;
1724
1725 strlcpy(breq.ifbr_ifsname, bif->bif_ifp->if_xname,
1726 sizeof(breq.ifbr_ifsname));
1727 breq.ifbr_ifsflags = bif->bif_flags;
1728 breq.ifbr_portno = bif->bif_ifp->if_index & 0xfff;
1729 memcpy(buf, &breq, sizeof(breq));
1730 count++;
1731 buf += sizeof(breq);
1732 len -= sizeof(breq);
1733 }
1734
1735 bifc->ifbic_len = sizeof(breq) * count;
1736 error = copyout(outbuf, bifc->ifbic_req, bifc->ifbic_len);
1737 free(outbuf, M_TEMP);
1738 return (error);
1739 }
1740
1741 static int
bridge_ioctl_rts(struct bridge_softc * sc,void * arg)1742 bridge_ioctl_rts(struct bridge_softc *sc, void *arg)
1743 {
1744 struct ifbaconf *bac = arg;
1745 struct bridge_rtnode *brt;
1746 struct ifbareq bareq;
1747 char *buf, *outbuf;
1748 int count, buflen, len, error = 0;
1749
1750 if (bac->ifbac_len == 0)
1751 return (0);
1752
1753 count = 0;
1754 CK_LIST_FOREACH(brt, &sc->sc_rtlist, brt_list)
1755 count++;
1756 buflen = sizeof(bareq) * count;
1757
1758 outbuf = malloc(buflen, M_TEMP, M_NOWAIT | M_ZERO);
1759 if (outbuf == NULL)
1760 return (ENOMEM);
1761
1762 count = 0;
1763 buf = outbuf;
1764 len = min(bac->ifbac_len, buflen);
1765 bzero(&bareq, sizeof(bareq));
1766 CK_LIST_FOREACH(brt, &sc->sc_rtlist, brt_list) {
1767 if (len < sizeof(bareq))
1768 goto out;
1769 strlcpy(bareq.ifba_ifsname, brt->brt_ifp->if_xname,
1770 sizeof(bareq.ifba_ifsname));
1771 memcpy(bareq.ifba_dst, brt->brt_addr, sizeof(brt->brt_addr));
1772 bareq.ifba_vlan = brt->brt_vlan;
1773 if ((brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC &&
1774 time_uptime < brt->brt_expire)
1775 bareq.ifba_expire = brt->brt_expire - time_uptime;
1776 else
1777 bareq.ifba_expire = 0;
1778 bareq.ifba_flags = brt->brt_flags;
1779
1780 memcpy(buf, &bareq, sizeof(bareq));
1781 count++;
1782 buf += sizeof(bareq);
1783 len -= sizeof(bareq);
1784 }
1785 out:
1786 bac->ifbac_len = sizeof(bareq) * count;
1787 error = copyout(outbuf, bac->ifbac_req, bac->ifbac_len);
1788 free(outbuf, M_TEMP);
1789 return (error);
1790 }
1791
1792 static int
bridge_ioctl_saddr(struct bridge_softc * sc,void * arg)1793 bridge_ioctl_saddr(struct bridge_softc *sc, void *arg)
1794 {
1795 struct ifbareq *req = arg;
1796 struct bridge_iflist *bif;
1797 struct epoch_tracker et;
1798 int error;
1799
1800 NET_EPOCH_ENTER(et);
1801 bif = bridge_lookup_member(sc, req->ifba_ifsname);
1802 if (bif == NULL) {
1803 NET_EPOCH_EXIT(et);
1804 return (EXTERROR(ENOENT, "Interface is not a bridge member"));
1805 }
1806
1807 /* bridge_rtupdate() may acquire the lock. */
1808 error = bridge_rtupdate(sc, req->ifba_dst, req->ifba_vlan, bif, 1,
1809 req->ifba_flags);
1810 NET_EPOCH_EXIT(et);
1811
1812 return (error);
1813 }
1814
1815 static int
bridge_ioctl_sto(struct bridge_softc * sc,void * arg)1816 bridge_ioctl_sto(struct bridge_softc *sc, void *arg)
1817 {
1818 struct ifbrparam *param = arg;
1819
1820 sc->sc_brttimeout = param->ifbrp_ctime;
1821 return (0);
1822 }
1823
1824 static int
bridge_ioctl_gto(struct bridge_softc * sc,void * arg)1825 bridge_ioctl_gto(struct bridge_softc *sc, void *arg)
1826 {
1827 struct ifbrparam *param = arg;
1828
1829 param->ifbrp_ctime = sc->sc_brttimeout;
1830 return (0);
1831 }
1832
1833 static int
bridge_ioctl_daddr(struct bridge_softc * sc,void * arg)1834 bridge_ioctl_daddr(struct bridge_softc *sc, void *arg)
1835 {
1836 struct ifbareq *req = arg;
1837 int vlan = req->ifba_vlan;
1838
1839 /* Userspace uses '0' to mean 'any vlan' */
1840 if (vlan == 0)
1841 vlan = DOT1Q_VID_RSVD_IMPL;
1842
1843 return (bridge_rtdaddr(sc, req->ifba_dst, vlan));
1844 }
1845
1846 static int
bridge_ioctl_flush(struct bridge_softc * sc,void * arg)1847 bridge_ioctl_flush(struct bridge_softc *sc, void *arg)
1848 {
1849 struct ifbreq *req = arg;
1850
1851 BRIDGE_RT_LOCK(sc);
1852 bridge_rtflush(sc, req->ifbr_ifsflags);
1853 BRIDGE_RT_UNLOCK(sc);
1854
1855 return (0);
1856 }
1857
1858 static int
bridge_ioctl_gpri(struct bridge_softc * sc,void * arg)1859 bridge_ioctl_gpri(struct bridge_softc *sc, void *arg)
1860 {
1861 struct ifbrparam *param = arg;
1862 struct bstp_state *bs = &sc->sc_stp;
1863
1864 param->ifbrp_prio = bs->bs_bridge_priority;
1865 return (0);
1866 }
1867
1868 static int
bridge_ioctl_spri(struct bridge_softc * sc,void * arg)1869 bridge_ioctl_spri(struct bridge_softc *sc, void *arg)
1870 {
1871 struct ifbrparam *param = arg;
1872
1873 return (bstp_set_priority(&sc->sc_stp, param->ifbrp_prio));
1874 }
1875
1876 static int
bridge_ioctl_ght(struct bridge_softc * sc,void * arg)1877 bridge_ioctl_ght(struct bridge_softc *sc, void *arg)
1878 {
1879 struct ifbrparam *param = arg;
1880 struct bstp_state *bs = &sc->sc_stp;
1881
1882 param->ifbrp_hellotime = bs->bs_bridge_htime >> 8;
1883 return (0);
1884 }
1885
1886 static int
bridge_ioctl_sht(struct bridge_softc * sc,void * arg)1887 bridge_ioctl_sht(struct bridge_softc *sc, void *arg)
1888 {
1889 struct ifbrparam *param = arg;
1890
1891 return (bstp_set_htime(&sc->sc_stp, param->ifbrp_hellotime));
1892 }
1893
1894 static int
bridge_ioctl_gfd(struct bridge_softc * sc,void * arg)1895 bridge_ioctl_gfd(struct bridge_softc *sc, void *arg)
1896 {
1897 struct ifbrparam *param = arg;
1898 struct bstp_state *bs = &sc->sc_stp;
1899
1900 param->ifbrp_fwddelay = bs->bs_bridge_fdelay >> 8;
1901 return (0);
1902 }
1903
1904 static int
bridge_ioctl_sfd(struct bridge_softc * sc,void * arg)1905 bridge_ioctl_sfd(struct bridge_softc *sc, void *arg)
1906 {
1907 struct ifbrparam *param = arg;
1908
1909 return (bstp_set_fdelay(&sc->sc_stp, param->ifbrp_fwddelay));
1910 }
1911
1912 static int
bridge_ioctl_gma(struct bridge_softc * sc,void * arg)1913 bridge_ioctl_gma(struct bridge_softc *sc, void *arg)
1914 {
1915 struct ifbrparam *param = arg;
1916 struct bstp_state *bs = &sc->sc_stp;
1917
1918 param->ifbrp_maxage = bs->bs_bridge_max_age >> 8;
1919 return (0);
1920 }
1921
1922 static int
bridge_ioctl_sma(struct bridge_softc * sc,void * arg)1923 bridge_ioctl_sma(struct bridge_softc *sc, void *arg)
1924 {
1925 struct ifbrparam *param = arg;
1926
1927 return (bstp_set_maxage(&sc->sc_stp, param->ifbrp_maxage));
1928 }
1929
1930 static int
bridge_ioctl_sifprio(struct bridge_softc * sc,void * arg)1931 bridge_ioctl_sifprio(struct bridge_softc *sc, void *arg)
1932 {
1933 struct ifbreq *req = arg;
1934 struct bridge_iflist *bif;
1935
1936 bif = bridge_lookup_member(sc, req->ifbr_ifsname);
1937 if (bif == NULL)
1938 return (EXTERROR(ENOENT, "Interface is not a bridge member"));
1939
1940 return (bstp_set_port_priority(&bif->bif_stp, req->ifbr_priority));
1941 }
1942
1943 static int
bridge_ioctl_sifcost(struct bridge_softc * sc,void * arg)1944 bridge_ioctl_sifcost(struct bridge_softc *sc, void *arg)
1945 {
1946 struct ifbreq *req = arg;
1947 struct bridge_iflist *bif;
1948
1949 bif = bridge_lookup_member(sc, req->ifbr_ifsname);
1950 if (bif == NULL)
1951 return (EXTERROR(ENOENT, "Interface is not a bridge member"));
1952
1953 return (bstp_set_path_cost(&bif->bif_stp, req->ifbr_path_cost));
1954 }
1955
1956 static int
bridge_ioctl_sifmaxaddr(struct bridge_softc * sc,void * arg)1957 bridge_ioctl_sifmaxaddr(struct bridge_softc *sc, void *arg)
1958 {
1959 struct ifbreq *req = arg;
1960 struct bridge_iflist *bif;
1961
1962 bif = bridge_lookup_member(sc, req->ifbr_ifsname);
1963 if (bif == NULL)
1964 return (EXTERROR(ENOENT, "Interface is not a bridge member"));
1965
1966 bif->bif_addrmax = req->ifbr_addrmax;
1967 return (0);
1968 }
1969
1970 static int
bridge_ioctl_sifpvid(struct bridge_softc * sc,void * arg)1971 bridge_ioctl_sifpvid(struct bridge_softc *sc, void *arg)
1972 {
1973 struct ifbreq *req = arg;
1974 struct bridge_iflist *bif;
1975
1976 bif = bridge_lookup_member(sc, req->ifbr_ifsname);
1977 if (bif == NULL)
1978 return (EXTERROR(ENOENT, "Interface is not a bridge member"));
1979
1980 if (req->ifbr_pvid > DOT1Q_VID_MAX)
1981 return (EXTERROR(EINVAL, "Invalid VLAN ID"));
1982
1983 bif->bif_pvid = req->ifbr_pvid;
1984 return (0);
1985 }
1986
1987 static int
bridge_ioctl_sifvlanset(struct bridge_softc * sc,void * arg)1988 bridge_ioctl_sifvlanset(struct bridge_softc *sc, void *arg)
1989 {
1990 struct ifbif_vlan_req *req = arg;
1991 struct bridge_iflist *bif;
1992
1993 bif = bridge_lookup_member(sc, req->bv_ifname);
1994 if (bif == NULL)
1995 return (EXTERROR(ENOENT, "Interface is not a bridge member"));
1996
1997 /* Reject invalid VIDs. */
1998 if (BRVLAN_TEST(&req->bv_set, DOT1Q_VID_NULL) ||
1999 BRVLAN_TEST(&req->bv_set, DOT1Q_VID_RSVD_IMPL))
2000 return (EXTERROR(EINVAL, "Invalid VLAN ID in set"));
2001
2002 switch (req->bv_op) {
2003 /* Replace the existing vlan set with the new set */
2004 case BRDG_VLAN_OP_SET:
2005 BIT_COPY(BRVLAN_SETSIZE, &req->bv_set, &bif->bif_vlan_set);
2006 break;
2007
2008 /* Modify the existing vlan set to add the given vlans */
2009 case BRDG_VLAN_OP_ADD:
2010 BIT_OR(BRVLAN_SETSIZE, &bif->bif_vlan_set, &req->bv_set);
2011 break;
2012
2013 /* Modify the existing vlan set to remove the given vlans */
2014 case BRDG_VLAN_OP_DEL:
2015 BIT_ANDNOT(BRVLAN_SETSIZE, &bif->bif_vlan_set, &req->bv_set);
2016 break;
2017
2018 /* Invalid or unknown operation */
2019 default:
2020 return (EXTERROR(EINVAL,
2021 "Unsupported BRDGSIFVLANSET operation"));
2022 }
2023
2024 return (0);
2025 }
2026
2027 static int
bridge_ioctl_gifvlanset(struct bridge_softc * sc,void * arg)2028 bridge_ioctl_gifvlanset(struct bridge_softc *sc, void *arg)
2029 {
2030 struct ifbif_vlan_req *req = arg;
2031 struct bridge_iflist *bif;
2032
2033 bif = bridge_lookup_member(sc, req->bv_ifname);
2034 if (bif == NULL)
2035 return (EXTERROR(ENOENT, "Interface is not a bridge member"));
2036
2037 BIT_COPY(BRVLAN_SETSIZE, &bif->bif_vlan_set, &req->bv_set);
2038 return (0);
2039 }
2040
2041 static int
bridge_ioctl_addspan(struct bridge_softc * sc,void * arg)2042 bridge_ioctl_addspan(struct bridge_softc *sc, void *arg)
2043 {
2044 struct ifbreq *req = arg;
2045 struct bridge_iflist *bif = NULL;
2046 struct ifnet *ifs;
2047
2048 ifs = ifunit(req->ifbr_ifsname);
2049 if (ifs == NULL)
2050 return (EXTERROR(ENOENT, "No such interface"));
2051
2052 CK_LIST_FOREACH(bif, &sc->sc_spanlist, bif_next)
2053 if (ifs == bif->bif_ifp)
2054 return (EXTERROR(EBUSY,
2055 "Interface is already a span port"));
2056
2057 if (ifs->if_bridge != NULL)
2058 return (EXTERROR(EEXIST,
2059 "Interface is already a bridge member"));
2060
2061 switch (ifs->if_type) {
2062 case IFT_ETHER:
2063 case IFT_GIF:
2064 case IFT_L2VLAN:
2065 break;
2066 default:
2067 return (EXTERROR(EINVAL, "Unsupported interface type"));
2068 }
2069
2070 bif = malloc(sizeof(*bif), M_DEVBUF, M_NOWAIT|M_ZERO);
2071 if (bif == NULL)
2072 return (ENOMEM);
2073
2074 bif->bif_ifp = ifs;
2075 bif->bif_flags = IFBIF_SPAN;
2076
2077 CK_LIST_INSERT_HEAD(&sc->sc_spanlist, bif, bif_next);
2078
2079 return (0);
2080 }
2081
2082 static int
bridge_ioctl_delspan(struct bridge_softc * sc,void * arg)2083 bridge_ioctl_delspan(struct bridge_softc *sc, void *arg)
2084 {
2085 struct ifbreq *req = arg;
2086 struct bridge_iflist *bif;
2087 struct ifnet *ifs;
2088
2089 ifs = ifunit(req->ifbr_ifsname);
2090 if (ifs == NULL)
2091 return (EXTERROR(ENOENT, "No such interface"));
2092
2093 CK_LIST_FOREACH(bif, &sc->sc_spanlist, bif_next)
2094 if (ifs == bif->bif_ifp)
2095 break;
2096
2097 if (bif == NULL)
2098 return (EXTERROR(ENOENT, "Interface is not a span port"));
2099
2100 bridge_delete_span(sc, bif);
2101
2102 return (0);
2103 }
2104
2105 static int
bridge_ioctl_gbparam(struct bridge_softc * sc,void * arg)2106 bridge_ioctl_gbparam(struct bridge_softc *sc, void *arg)
2107 {
2108 struct ifbropreq *req = arg;
2109 struct bstp_state *bs = &sc->sc_stp;
2110 struct bstp_port *root_port;
2111
2112 req->ifbop_maxage = bs->bs_bridge_max_age >> 8;
2113 req->ifbop_hellotime = bs->bs_bridge_htime >> 8;
2114 req->ifbop_fwddelay = bs->bs_bridge_fdelay >> 8;
2115
2116 root_port = bs->bs_root_port;
2117 if (root_port == NULL)
2118 req->ifbop_root_port = 0;
2119 else
2120 req->ifbop_root_port = root_port->bp_ifp->if_index;
2121
2122 req->ifbop_holdcount = bs->bs_txholdcount;
2123 req->ifbop_priority = bs->bs_bridge_priority;
2124 req->ifbop_protocol = bs->bs_protover;
2125 req->ifbop_root_path_cost = bs->bs_root_pv.pv_cost;
2126 req->ifbop_bridgeid = bs->bs_bridge_pv.pv_dbridge_id;
2127 req->ifbop_designated_root = bs->bs_root_pv.pv_root_id;
2128 req->ifbop_designated_bridge = bs->bs_root_pv.pv_dbridge_id;
2129 req->ifbop_last_tc_time.tv_sec = bs->bs_last_tc_time.tv_sec;
2130 req->ifbop_last_tc_time.tv_usec = bs->bs_last_tc_time.tv_usec;
2131
2132 return (0);
2133 }
2134
2135 static int
bridge_ioctl_grte(struct bridge_softc * sc,void * arg)2136 bridge_ioctl_grte(struct bridge_softc *sc, void *arg)
2137 {
2138 struct ifbrparam *param = arg;
2139
2140 param->ifbrp_cexceeded = sc->sc_brtexceeded;
2141 return (0);
2142 }
2143
2144 static int
bridge_ioctl_gifsstp(struct bridge_softc * sc,void * arg)2145 bridge_ioctl_gifsstp(struct bridge_softc *sc, void *arg)
2146 {
2147 struct ifbpstpconf *bifstp = arg;
2148 struct bridge_iflist *bif;
2149 struct bstp_port *bp;
2150 struct ifbpstpreq bpreq;
2151 char *buf, *outbuf;
2152 int count, buflen, len, error = 0;
2153
2154 count = 0;
2155 CK_LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
2156 if ((bif->bif_flags & IFBIF_STP) != 0)
2157 count++;
2158 }
2159
2160 buflen = sizeof(bpreq) * count;
2161 if (bifstp->ifbpstp_len == 0) {
2162 bifstp->ifbpstp_len = buflen;
2163 return (0);
2164 }
2165
2166 outbuf = malloc(buflen, M_TEMP, M_NOWAIT | M_ZERO);
2167 if (outbuf == NULL)
2168 return (ENOMEM);
2169
2170 count = 0;
2171 buf = outbuf;
2172 len = min(bifstp->ifbpstp_len, buflen);
2173 bzero(&bpreq, sizeof(bpreq));
2174 CK_LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
2175 if (len < sizeof(bpreq))
2176 break;
2177
2178 if ((bif->bif_flags & IFBIF_STP) == 0)
2179 continue;
2180
2181 bp = &bif->bif_stp;
2182 bpreq.ifbp_portno = bif->bif_ifp->if_index & 0xfff;
2183 bpreq.ifbp_fwd_trans = bp->bp_forward_transitions;
2184 bpreq.ifbp_design_cost = bp->bp_desg_pv.pv_cost;
2185 bpreq.ifbp_design_port = bp->bp_desg_pv.pv_port_id;
2186 bpreq.ifbp_design_bridge = bp->bp_desg_pv.pv_dbridge_id;
2187 bpreq.ifbp_design_root = bp->bp_desg_pv.pv_root_id;
2188
2189 memcpy(buf, &bpreq, sizeof(bpreq));
2190 count++;
2191 buf += sizeof(bpreq);
2192 len -= sizeof(bpreq);
2193 }
2194
2195 bifstp->ifbpstp_len = sizeof(bpreq) * count;
2196 error = copyout(outbuf, bifstp->ifbpstp_req, bifstp->ifbpstp_len);
2197 free(outbuf, M_TEMP);
2198 return (error);
2199 }
2200
2201 static int
bridge_ioctl_sproto(struct bridge_softc * sc,void * arg)2202 bridge_ioctl_sproto(struct bridge_softc *sc, void *arg)
2203 {
2204 struct ifbrparam *param = arg;
2205
2206 return (bstp_set_protocol(&sc->sc_stp, param->ifbrp_proto));
2207 }
2208
2209 static int
bridge_ioctl_stxhc(struct bridge_softc * sc,void * arg)2210 bridge_ioctl_stxhc(struct bridge_softc *sc, void *arg)
2211 {
2212 struct ifbrparam *param = arg;
2213
2214 return (bstp_set_holdcount(&sc->sc_stp, param->ifbrp_txhc));
2215 }
2216
2217 static int
bridge_ioctl_gflags(struct bridge_softc * sc,void * arg)2218 bridge_ioctl_gflags(struct bridge_softc *sc, void *arg)
2219 {
2220 struct ifbrparam *param = arg;
2221
2222 param->ifbrp_flags = sc->sc_flags;
2223
2224 return (0);
2225 }
2226
2227 static int
bridge_ioctl_sflags(struct bridge_softc * sc,void * arg)2228 bridge_ioctl_sflags(struct bridge_softc *sc, void *arg)
2229 {
2230 struct ifbrparam *param = arg;
2231
2232 sc->sc_flags = param->ifbrp_flags;
2233
2234 return (0);
2235 }
2236
2237 static int
bridge_ioctl_gdefpvid(struct bridge_softc * sc,void * arg)2238 bridge_ioctl_gdefpvid(struct bridge_softc *sc, void *arg)
2239 {
2240 struct ifbrparam *param = arg;
2241
2242 param->ifbrp_defpvid = sc->sc_defpvid;
2243
2244 return (0);
2245 }
2246
2247 static int
bridge_ioctl_sdefpvid(struct bridge_softc * sc,void * arg)2248 bridge_ioctl_sdefpvid(struct bridge_softc *sc, void *arg)
2249 {
2250 struct ifbrparam *param = arg;
2251
2252 /* Reject invalid VIDs, but allow 0 to mean 'none'. */
2253 if (param->ifbrp_defpvid > DOT1Q_VID_MAX)
2254 return (EINVAL);
2255
2256 sc->sc_defpvid = param->ifbrp_defpvid;
2257
2258 return (0);
2259 }
2260
2261 static int
bridge_ioctl_svlanproto(struct bridge_softc * sc,void * arg)2262 bridge_ioctl_svlanproto(struct bridge_softc *sc, void *arg)
2263 {
2264 struct ifbreq *req = arg;
2265 struct bridge_iflist *bif;
2266
2267 bif = bridge_lookup_member(sc, req->ifbr_ifsname);
2268 if (bif == NULL)
2269 return (EXTERROR(ENOENT, "Interface is not a bridge member"));
2270
2271 if (req->ifbr_vlanproto != ETHERTYPE_VLAN &&
2272 req->ifbr_vlanproto != ETHERTYPE_QINQ)
2273 return (EXTERROR(EINVAL, "Invalid VLAN protocol"));
2274
2275 bif->bif_vlanproto = req->ifbr_vlanproto;
2276
2277 return (0);
2278 }
2279 /*
2280 * bridge_ifdetach:
2281 *
2282 * Detach an interface from a bridge. Called when a member
2283 * interface is detaching.
2284 */
2285 static void
bridge_ifdetach(void * arg __unused,struct ifnet * ifp)2286 bridge_ifdetach(void *arg __unused, struct ifnet *ifp)
2287 {
2288 struct bridge_iflist *bif = ifp->if_bridge;
2289 struct bridge_softc *sc = NULL;
2290
2291 if (bif)
2292 sc = bif->bif_sc;
2293
2294 if (ifp->if_flags & IFF_RENAMING)
2295 return;
2296 if (V_bridge_cloner == NULL) {
2297 /*
2298 * This detach handler can be called after
2299 * vnet_bridge_uninit(). Just return in that case.
2300 */
2301 return;
2302 }
2303 /* Check if the interface is a bridge member */
2304 if (sc != NULL) {
2305 BRIDGE_LOCK(sc);
2306 bridge_delete_member(sc, bif, 1);
2307 BRIDGE_UNLOCK(sc);
2308 return;
2309 }
2310
2311 /* Check if the interface is a span port */
2312 BRIDGE_LIST_LOCK();
2313 LIST_FOREACH(sc, &V_bridge_list, sc_list) {
2314 BRIDGE_LOCK(sc);
2315 CK_LIST_FOREACH(bif, &sc->sc_spanlist, bif_next)
2316 if (ifp == bif->bif_ifp) {
2317 bridge_delete_span(sc, bif);
2318 break;
2319 }
2320
2321 BRIDGE_UNLOCK(sc);
2322 }
2323 BRIDGE_LIST_UNLOCK();
2324 }
2325
2326 /*
2327 * bridge_init:
2328 *
2329 * Initialize a bridge interface.
2330 */
2331 static void
bridge_init(void * xsc)2332 bridge_init(void *xsc)
2333 {
2334 struct bridge_softc *sc = (struct bridge_softc *)xsc;
2335 struct ifnet *ifp = sc->sc_ifp;
2336
2337 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
2338 return;
2339
2340 BRIDGE_LOCK(sc);
2341 callout_reset(&sc->sc_brcallout, bridge_rtable_prune_period * hz,
2342 bridge_timer, sc);
2343
2344 ifp->if_drv_flags |= IFF_DRV_RUNNING;
2345 bstp_init(&sc->sc_stp); /* Initialize Spanning Tree */
2346
2347 BRIDGE_UNLOCK(sc);
2348 }
2349
2350 /*
2351 * bridge_stop:
2352 *
2353 * Stop the bridge interface.
2354 */
2355 static void
bridge_stop(struct ifnet * ifp,int disable)2356 bridge_stop(struct ifnet *ifp, int disable)
2357 {
2358 struct bridge_softc *sc = ifp->if_softc;
2359
2360 BRIDGE_LOCK_ASSERT(sc);
2361
2362 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
2363 return;
2364
2365 BRIDGE_RT_LOCK(sc);
2366 callout_stop(&sc->sc_brcallout);
2367
2368 bstp_stop(&sc->sc_stp);
2369
2370 bridge_rtflush(sc, IFBF_FLUSHDYN);
2371 BRIDGE_RT_UNLOCK(sc);
2372
2373 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2374 }
2375
2376 /*
2377 * bridge_enqueue:
2378 *
2379 * Enqueue a packet on a bridge member interface.
2380 *
2381 */
2382 static int
bridge_enqueue(struct bridge_softc * sc,struct ifnet * dst_ifp,struct mbuf * m,struct bridge_iflist * bif)2383 bridge_enqueue(struct bridge_softc *sc, struct ifnet *dst_ifp, struct mbuf *m,
2384 struct bridge_iflist *bif)
2385 {
2386 int len, err = 0;
2387 short mflags;
2388 struct mbuf *m0;
2389
2390 /*
2391 * Find the bridge member port this packet is being sent on, if the
2392 * caller didn't already provide it.
2393 */
2394 if (bif == NULL)
2395 bif = bridge_lookup_member_if(sc, dst_ifp);
2396 if (bif == NULL) {
2397 /* Perhaps the interface was removed from the bridge */
2398 m_freem(m);
2399 return (EINVAL);
2400 }
2401
2402 /* Do VLAN filtering. */
2403 if (!bridge_vfilter_out(bif, m)) {
2404 m_freem(m);
2405 return (0);
2406 }
2407
2408 /* We may be sending a fragment so traverse the mbuf */
2409 for (; m; m = m0) {
2410 m0 = m->m_nextpkt;
2411 m->m_nextpkt = NULL;
2412 len = m->m_pkthdr.len;
2413 mflags = m->m_flags;
2414
2415 /*
2416 * If the native VLAN ID of the outgoing interface matches the
2417 * VLAN ID of the frame, remove the VLAN tag.
2418 */
2419 if (bif->bif_pvid != DOT1Q_VID_NULL &&
2420 VLANTAGOF(m) == bif->bif_pvid) {
2421 m->m_flags &= ~M_VLANTAG;
2422 m->m_pkthdr.ether_vtag = 0;
2423 }
2424
2425 /*
2426 * There are two cases where we have to insert our own tag:
2427 * if the member interface doesn't support hardware tagging,
2428 * or if the tag proto is not 802.1q.
2429 */
2430 if ((m->m_flags & M_VLANTAG) &&
2431 ((dst_ifp->if_capenable & IFCAP_VLAN_HWTAGGING) == 0 ||
2432 bif->bif_vlanproto != ETHERTYPE_VLAN)) {
2433 m = ether_vlanencap_proto(m, m->m_pkthdr.ether_vtag,
2434 bif->bif_vlanproto);
2435 if (m == NULL) {
2436 if_printf(dst_ifp,
2437 "unable to prepend VLAN header\n");
2438 if_inc_counter(dst_ifp, IFCOUNTER_OERRORS, 1);
2439 continue;
2440 }
2441 m->m_flags &= ~M_VLANTAG;
2442 }
2443
2444 M_ASSERTPKTHDR(m); /* We shouldn't transmit mbuf without pkthdr */
2445 /*
2446 * XXXZL: gif(4) requires the af to be saved in csum_data field
2447 * so that gif_transmit() routine can pull it back.
2448 */
2449 if (dst_ifp->if_type == IFT_GIF)
2450 m->m_pkthdr.csum_data = AF_LINK;
2451 if ((err = dst_ifp->if_transmit(dst_ifp, m))) {
2452 int n;
2453
2454 for (m = m0, n = 1; m != NULL; m = m0, n++) {
2455 m0 = m->m_nextpkt;
2456 m_freem(m);
2457 }
2458 if_inc_counter(sc->sc_ifp, IFCOUNTER_OERRORS, n);
2459 break;
2460 }
2461
2462 if_inc_counter(sc->sc_ifp, IFCOUNTER_OPACKETS, 1);
2463 if_inc_counter(sc->sc_ifp, IFCOUNTER_OBYTES, len);
2464 if (mflags & M_MCAST)
2465 if_inc_counter(sc->sc_ifp, IFCOUNTER_OMCASTS, 1);
2466 }
2467
2468 return (err);
2469 }
2470
2471 /*
2472 * bridge_dummynet:
2473 *
2474 * Receive a queued packet from dummynet and pass it on to the output
2475 * interface.
2476 *
2477 * The mbuf has the Ethernet header already attached.
2478 */
2479 static void
bridge_dummynet(struct mbuf * m,struct ifnet * ifp)2480 bridge_dummynet(struct mbuf *m, struct ifnet *ifp)
2481 {
2482 struct bridge_iflist *bif = ifp->if_bridge;
2483 struct bridge_softc *sc = NULL;
2484
2485 if (bif)
2486 sc = bif->bif_sc;
2487
2488 /*
2489 * The packet didnt originate from a member interface. This should only
2490 * ever happen if a member interface is removed while packets are
2491 * queued for it.
2492 */
2493 if (sc == NULL) {
2494 m_freem(m);
2495 return;
2496 }
2497
2498 if (PFIL_HOOKED_OUT_46) {
2499 if (bridge_pfil(&m, sc->sc_ifp, ifp, PFIL_OUT) != 0)
2500 return;
2501 if (m == NULL)
2502 return;
2503 }
2504
2505 bridge_enqueue(sc, ifp, m, NULL);
2506 }
2507
2508 /*
2509 * bridge_output:
2510 *
2511 * Send output from a bridge member interface. This
2512 * performs the bridging function for locally originated
2513 * packets.
2514 *
2515 * The mbuf has the Ethernet header already attached. We must
2516 * enqueue or free the mbuf before returning.
2517 */
2518 static int
bridge_output(struct ifnet * ifp,struct mbuf * m,struct sockaddr * sa,struct rtentry * rt)2519 bridge_output(struct ifnet *ifp, struct mbuf *m, struct sockaddr *sa,
2520 struct rtentry *rt)
2521 {
2522 struct ether_header *eh;
2523 struct bridge_iflist *sbif;
2524 struct ifnet *bifp, *dst_if;
2525 struct bridge_softc *sc;
2526 ether_vlanid_t vlan;
2527
2528 NET_EPOCH_ASSERT();
2529
2530 if (m->m_len < ETHER_HDR_LEN) {
2531 m = m_pullup(m, ETHER_HDR_LEN);
2532 if (m == NULL)
2533 return (0);
2534 }
2535
2536 sbif = ifp->if_bridge;
2537 sc = sbif->bif_sc;
2538 bifp = sc->sc_ifp;
2539
2540 eh = mtod(m, struct ether_header *);
2541 vlan = VLANTAGOF(m);
2542
2543 /*
2544 * If bridge is down, but the original output interface is up,
2545 * go ahead and send out that interface. Otherwise, the packet
2546 * is dropped below.
2547 */
2548 if ((bifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
2549 dst_if = ifp;
2550 goto sendunicast;
2551 }
2552
2553 /*
2554 * If the packet is a multicast, or we don't know a better way to
2555 * get there, send to all interfaces.
2556 */
2557 if (ETHER_IS_MULTICAST(eh->ether_dhost))
2558 dst_if = NULL;
2559 else
2560 dst_if = bridge_rtlookup(sc, eh->ether_dhost, vlan);
2561 /* Tap any traffic not passing back out the originating interface */
2562 if (dst_if != ifp)
2563 ETHER_BPF_MTAP(bifp, m);
2564 if (dst_if == NULL) {
2565 struct bridge_iflist *bif;
2566 struct mbuf *mc;
2567 int used = 0;
2568
2569 bridge_span(sc, m);
2570
2571 CK_LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
2572 dst_if = bif->bif_ifp;
2573
2574 if (dst_if->if_type == IFT_GIF)
2575 continue;
2576 if ((dst_if->if_drv_flags & IFF_DRV_RUNNING) == 0)
2577 continue;
2578
2579 /*
2580 * If this is not the original output interface,
2581 * and the interface is participating in spanning
2582 * tree, make sure the port is in a state that
2583 * allows forwarding.
2584 */
2585 if (dst_if != ifp && (bif->bif_flags & IFBIF_STP) &&
2586 bif->bif_stp.bp_state == BSTP_IFSTATE_DISCARDING)
2587 continue;
2588
2589 if (CK_LIST_NEXT(bif, bif_next) == NULL) {
2590 used = 1;
2591 mc = m;
2592 } else {
2593 mc = m_dup(m, M_NOWAIT);
2594 if (mc == NULL) {
2595 if_inc_counter(bifp, IFCOUNTER_OERRORS, 1);
2596 continue;
2597 }
2598 }
2599
2600 bridge_enqueue(sc, dst_if, mc, bif);
2601 }
2602 if (used == 0)
2603 m_freem(m);
2604 return (0);
2605 }
2606
2607 sendunicast:
2608 /*
2609 * XXX Spanning tree consideration here?
2610 */
2611
2612 bridge_span(sc, m);
2613 if ((dst_if->if_drv_flags & IFF_DRV_RUNNING) == 0) {
2614 m_freem(m);
2615 return (0);
2616 }
2617
2618 bridge_enqueue(sc, dst_if, m, NULL);
2619 return (0);
2620 }
2621
2622 /*
2623 * bridge_transmit:
2624 *
2625 * Do output on a bridge.
2626 *
2627 */
2628 static int
bridge_transmit(struct ifnet * ifp,struct mbuf * m)2629 bridge_transmit(struct ifnet *ifp, struct mbuf *m)
2630 {
2631 struct bridge_softc *sc;
2632 struct ether_header *eh;
2633 struct ifnet *dst_if;
2634 int error = 0;
2635 ether_vlanid_t vlan;
2636
2637 sc = ifp->if_softc;
2638
2639 ETHER_BPF_MTAP(ifp, m);
2640
2641 eh = mtod(m, struct ether_header *);
2642 vlan = VLANTAGOF(m);
2643
2644 if (((m->m_flags & (M_BCAST|M_MCAST)) == 0) &&
2645 (dst_if = bridge_rtlookup(sc, eh->ether_dhost, vlan)) != NULL) {
2646 error = bridge_enqueue(sc, dst_if, m, NULL);
2647 } else
2648 bridge_broadcast(sc, ifp, m, 0);
2649
2650 return (error);
2651 }
2652
2653 #ifdef ALTQ
2654 static void
bridge_altq_start(if_t ifp)2655 bridge_altq_start(if_t ifp)
2656 {
2657 struct ifaltq *ifq = &ifp->if_snd;
2658 struct mbuf *m;
2659
2660 IFQ_LOCK(ifq);
2661 IFQ_DEQUEUE_NOLOCK(ifq, m);
2662 while (m != NULL) {
2663 bridge_transmit(ifp, m);
2664 IFQ_DEQUEUE_NOLOCK(ifq, m);
2665 }
2666 IFQ_UNLOCK(ifq);
2667 }
2668
2669 static int
bridge_altq_transmit(if_t ifp,struct mbuf * m)2670 bridge_altq_transmit(if_t ifp, struct mbuf *m)
2671 {
2672 int err;
2673
2674 if (ALTQ_IS_ENABLED(&ifp->if_snd)) {
2675 IFQ_ENQUEUE(&ifp->if_snd, m, err);
2676 if (err == 0)
2677 bridge_altq_start(ifp);
2678 } else
2679 err = bridge_transmit(ifp, m);
2680
2681 return (err);
2682 }
2683 #endif /* ALTQ */
2684
2685 /*
2686 * The ifp->if_qflush entry point for if_bridge(4) is no-op.
2687 */
2688 static void
bridge_qflush(struct ifnet * ifp __unused)2689 bridge_qflush(struct ifnet *ifp __unused)
2690 {
2691 }
2692
2693 /*
2694 * bridge_forward:
2695 *
2696 * The forwarding function of the bridge.
2697 *
2698 * NOTE: Releases the lock on return.
2699 */
2700 static void
bridge_forward(struct bridge_softc * sc,struct bridge_iflist * sbif,struct mbuf * m)2701 bridge_forward(struct bridge_softc *sc, struct bridge_iflist *sbif,
2702 struct mbuf *m)
2703 {
2704 struct bridge_iflist *dbif;
2705 struct ifnet *src_if, *dst_if, *ifp;
2706 struct ether_header *eh;
2707 uint8_t *dst;
2708 int error;
2709 ether_vlanid_t vlan;
2710
2711 NET_EPOCH_ASSERT();
2712
2713 src_if = m->m_pkthdr.rcvif;
2714 ifp = sc->sc_ifp;
2715 vlan = VLANTAGOF(m);
2716
2717 if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
2718 if_inc_counter(ifp, IFCOUNTER_IBYTES, m->m_pkthdr.len);
2719
2720 if ((sbif->bif_flags & IFBIF_STP) &&
2721 sbif->bif_stp.bp_state == BSTP_IFSTATE_DISCARDING)
2722 goto drop;
2723
2724 eh = mtod(m, struct ether_header *);
2725 dst = eh->ether_dhost;
2726
2727 /* If the interface is learning, record the address. */
2728 if (sbif->bif_flags & IFBIF_LEARNING) {
2729 error = bridge_rtupdate(sc, eh->ether_shost, vlan,
2730 sbif, 0, IFBAF_DYNAMIC);
2731 /*
2732 * If the interface has addresses limits then deny any source
2733 * that is not in the cache.
2734 */
2735 if (error && sbif->bif_addrmax)
2736 goto drop;
2737 }
2738
2739 if ((sbif->bif_flags & IFBIF_STP) != 0 &&
2740 sbif->bif_stp.bp_state == BSTP_IFSTATE_LEARNING)
2741 goto drop;
2742
2743 #ifdef DEV_NETMAP
2744 /*
2745 * Hand the packet to netmap only if it wasn't injected by netmap
2746 * itself.
2747 */
2748 if ((m->m_flags & M_BRIDGE_INJECT) == 0 &&
2749 (if_getcapenable(ifp) & IFCAP_NETMAP) != 0) {
2750 ifp->if_input(ifp, m);
2751 return;
2752 }
2753 m->m_flags &= ~M_BRIDGE_INJECT;
2754 #endif
2755
2756 /*
2757 * At this point, the port either doesn't participate
2758 * in spanning tree or it is in the forwarding state.
2759 */
2760
2761 /*
2762 * If the packet is unicast, destined for someone on
2763 * "this" side of the bridge, drop it.
2764 */
2765 if ((m->m_flags & (M_BCAST|M_MCAST)) == 0) {
2766 dst_if = bridge_rtlookup(sc, dst, vlan);
2767 if (src_if == dst_if)
2768 goto drop;
2769 } else {
2770 /*
2771 * Check if its a reserved multicast address, any address
2772 * listed in 802.1D section 7.12.6 may not be forwarded by the
2773 * bridge.
2774 * This is currently 01-80-C2-00-00-00 to 01-80-C2-00-00-0F
2775 */
2776 if (dst[0] == 0x01 && dst[1] == 0x80 &&
2777 dst[2] == 0xc2 && dst[3] == 0x00 &&
2778 dst[4] == 0x00 && dst[5] <= 0x0f)
2779 goto drop;
2780
2781 /* ...forward it to all interfaces. */
2782 if_inc_counter(ifp, IFCOUNTER_IMCASTS, 1);
2783 dst_if = NULL;
2784 }
2785
2786 /*
2787 * If we have a destination interface which is a member of our bridge,
2788 * OR this is a unicast packet, push it through the bpf(4) machinery.
2789 * For broadcast or multicast packets, don't bother because it will
2790 * be reinjected into ether_input. We do this before we pass the packets
2791 * through the pfil(9) framework, as it is possible that pfil(9) will
2792 * drop the packet, or possibly modify it, making it difficult to debug
2793 * firewall issues on the bridge.
2794 */
2795 if (dst_if != NULL || (m->m_flags & (M_BCAST | M_MCAST)) == 0)
2796 ETHER_BPF_MTAP(ifp, m);
2797
2798 /* run the packet filter */
2799 if (PFIL_HOOKED_IN_46) {
2800 if (bridge_pfil(&m, ifp, src_if, PFIL_IN) != 0)
2801 return;
2802 if (m == NULL)
2803 return;
2804 }
2805
2806 if (dst_if == NULL) {
2807 bridge_broadcast(sc, src_if, m, 1);
2808 return;
2809 }
2810
2811 /*
2812 * At this point, we're dealing with a unicast frame
2813 * going to a different interface.
2814 */
2815 if ((dst_if->if_drv_flags & IFF_DRV_RUNNING) == 0)
2816 goto drop;
2817
2818 dbif = bridge_lookup_member_if(sc, dst_if);
2819 if (dbif == NULL)
2820 /* Not a member of the bridge (anymore?) */
2821 goto drop;
2822
2823 /* Private segments can not talk to each other */
2824 if (sbif->bif_flags & dbif->bif_flags & IFBIF_PRIVATE)
2825 goto drop;
2826
2827 if ((dbif->bif_flags & IFBIF_STP) &&
2828 dbif->bif_stp.bp_state == BSTP_IFSTATE_DISCARDING)
2829 goto drop;
2830
2831 if (PFIL_HOOKED_OUT_46) {
2832 if (bridge_pfil(&m, ifp, dst_if, PFIL_OUT) != 0)
2833 return;
2834 if (m == NULL)
2835 return;
2836 }
2837
2838 bridge_enqueue(sc, dst_if, m, dbif);
2839 return;
2840
2841 drop:
2842 m_freem(m);
2843 }
2844
2845 /*
2846 * bridge_input:
2847 *
2848 * Receive input from a member interface. Queue the packet for
2849 * bridging if it is not for us.
2850 */
2851 static struct mbuf *
bridge_input(struct ifnet * ifp,struct mbuf * m)2852 bridge_input(struct ifnet *ifp, struct mbuf *m)
2853 {
2854 struct bridge_softc *sc = NULL;
2855 struct bridge_iflist *bif, *bif2;
2856 struct ifnet *bifp;
2857 struct ether_header *eh;
2858 struct mbuf *mc, *mc2;
2859 ether_vlanid_t vlan;
2860 int error;
2861
2862 NET_EPOCH_ASSERT();
2863
2864 /* We need the Ethernet header later, so make sure we have it now. */
2865 if (m->m_len < ETHER_HDR_LEN) {
2866 m = m_pullup(m, ETHER_HDR_LEN);
2867 if (m == NULL) {
2868 if_inc_counter(sc->sc_ifp, IFCOUNTER_IERRORS, 1);
2869 m_freem(m);
2870 return (NULL);
2871 }
2872 }
2873
2874 eh = mtod(m, struct ether_header *);
2875 vlan = VLANTAGOF(m);
2876
2877 /*
2878 * If this frame has a VLAN tag and the receiving interface has a
2879 * vlan(4) trunk, then it is is destined for vlan(4), not for us.
2880 * This means if vlan(4) and bridge(4) are configured on the same
2881 * interface, vlan(4) is preferred, which is what users typically
2882 * expect.
2883 */
2884 if (vlan != DOT1Q_VID_NULL && ifp->if_vlantrunk != NULL)
2885 return (m);
2886
2887 bif = ifp->if_bridge;
2888 if (bif)
2889 sc = bif->bif_sc;
2890
2891 if (sc == NULL) {
2892 /*
2893 * This packet originated from the bridge itself, so it must
2894 * have been transmitted by netmap. Derive the "source"
2895 * interface from the source address and drop the packet if the
2896 * source address isn't known.
2897 */
2898 KASSERT((m->m_flags & M_BRIDGE_INJECT) != 0,
2899 ("%s: ifnet %p missing a bridge softc", __func__, ifp));
2900 sc = if_getsoftc(ifp);
2901 ifp = bridge_rtlookup(sc, eh->ether_shost, vlan);
2902 if (ifp == NULL) {
2903 if_inc_counter(sc->sc_ifp, IFCOUNTER_IERRORS, 1);
2904 m_freem(m);
2905 return (NULL);
2906 }
2907 m->m_pkthdr.rcvif = ifp;
2908 }
2909 bifp = sc->sc_ifp;
2910 if ((bifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
2911 return (m);
2912
2913 /*
2914 * Implement support for bridge monitoring. If this flag has been
2915 * set on this interface, discard the packet once we push it through
2916 * the bpf(4) machinery, but before we do, increment the byte and
2917 * packet counters associated with this interface.
2918 */
2919 if ((bifp->if_flags & IFF_MONITOR) != 0) {
2920 m->m_pkthdr.rcvif = bifp;
2921 ETHER_BPF_MTAP(bifp, m);
2922 if_inc_counter(bifp, IFCOUNTER_IPACKETS, 1);
2923 if_inc_counter(bifp, IFCOUNTER_IBYTES, m->m_pkthdr.len);
2924 m_freem(m);
2925 return (NULL);
2926 }
2927
2928 /* Do VLAN filtering. */
2929 if (!bridge_vfilter_in(bif, m)) {
2930 if_inc_counter(sc->sc_ifp, IFCOUNTER_IERRORS, 1);
2931 m_freem(m);
2932 return (NULL);
2933 }
2934 /* bridge_vfilter_in() may add a tag */
2935 vlan = VLANTAGOF(m);
2936
2937 bridge_span(sc, m);
2938
2939 if (m->m_flags & (M_BCAST|M_MCAST)) {
2940 /* Tap off 802.1D packets; they do not get forwarded. */
2941 if (memcmp(eh->ether_dhost, bstp_etheraddr,
2942 ETHER_ADDR_LEN) == 0) {
2943 bstp_input(&bif->bif_stp, ifp, m); /* consumes mbuf */
2944 return (NULL);
2945 }
2946
2947 if ((bif->bif_flags & IFBIF_STP) &&
2948 bif->bif_stp.bp_state == BSTP_IFSTATE_DISCARDING) {
2949 return (m);
2950 }
2951
2952 /*
2953 * Make a deep copy of the packet and enqueue the copy
2954 * for bridge processing; return the original packet for
2955 * local processing.
2956 */
2957 mc = m_dup(m, M_NOWAIT);
2958 if (mc == NULL) {
2959 return (m);
2960 }
2961
2962 /* Perform the bridge forwarding function with the copy. */
2963 bridge_forward(sc, bif, mc);
2964
2965 #ifdef DEV_NETMAP
2966 /*
2967 * If netmap is enabled and has not already seen this packet,
2968 * then it will be consumed by bridge_forward().
2969 */
2970 if ((if_getcapenable(bifp) & IFCAP_NETMAP) != 0 &&
2971 (m->m_flags & M_BRIDGE_INJECT) == 0) {
2972 m_freem(m);
2973 return (NULL);
2974 }
2975 #endif
2976
2977 /*
2978 * Reinject the mbuf as arriving on the bridge so we have a
2979 * chance at claiming multicast packets. We can not loop back
2980 * here from ether_input as a bridge is never a member of a
2981 * bridge.
2982 */
2983 KASSERT(bifp->if_bridge == NULL,
2984 ("loop created in bridge_input"));
2985 mc2 = m_dup(m, M_NOWAIT);
2986 if (mc2 != NULL) {
2987 /* Keep the layer3 header aligned */
2988 int i = min(mc2->m_pkthdr.len, max_protohdr);
2989 mc2 = m_copyup(mc2, i, ETHER_ALIGN);
2990 }
2991 if (mc2 != NULL) {
2992 mc2->m_pkthdr.rcvif = bifp;
2993 mc2->m_flags &= ~M_BRIDGE_INJECT;
2994 sc->sc_if_input(bifp, mc2);
2995 }
2996
2997 /* Return the original packet for local processing. */
2998 return (m);
2999 }
3000
3001 if ((bif->bif_flags & IFBIF_STP) &&
3002 bif->bif_stp.bp_state == BSTP_IFSTATE_DISCARDING) {
3003 return (m);
3004 }
3005
3006 #if defined(INET) || defined(INET6)
3007 #define CARP_CHECK_WE_ARE_DST(iface) \
3008 ((iface)->if_carp && (*carp_forus_p)((iface), eh->ether_dhost))
3009 #define CARP_CHECK_WE_ARE_SRC(iface) \
3010 ((iface)->if_carp && (*carp_forus_p)((iface), eh->ether_shost))
3011 #else
3012 #define CARP_CHECK_WE_ARE_DST(iface) false
3013 #define CARP_CHECK_WE_ARE_SRC(iface) false
3014 #endif
3015
3016 #ifdef DEV_NETMAP
3017 #define GRAB_FOR_NETMAP(ifp, m) do { \
3018 if ((if_getcapenable(ifp) & IFCAP_NETMAP) != 0 && \
3019 ((m)->m_flags & M_BRIDGE_INJECT) == 0) { \
3020 (ifp)->if_input(ifp, m); \
3021 return (NULL); \
3022 } \
3023 } while (0)
3024 #else
3025 #define GRAB_FOR_NETMAP(ifp, m)
3026 #endif
3027
3028 #define GRAB_OUR_PACKETS(iface) \
3029 if ((iface)->if_type == IFT_GIF) \
3030 continue; \
3031 /* It is destined for us. */ \
3032 if (memcmp(IF_LLADDR(iface), eh->ether_dhost, ETHER_ADDR_LEN) == 0 || \
3033 CARP_CHECK_WE_ARE_DST(iface)) { \
3034 if (bif->bif_flags & IFBIF_LEARNING) { \
3035 error = bridge_rtupdate(sc, eh->ether_shost, \
3036 vlan, bif, 0, IFBAF_DYNAMIC); \
3037 if (error && bif->bif_addrmax) { \
3038 m_freem(m); \
3039 return (NULL); \
3040 } \
3041 } \
3042 m->m_pkthdr.rcvif = iface; \
3043 if ((iface) == ifp) { \
3044 /* Skip bridge processing... src == dest */ \
3045 return (m); \
3046 } \
3047 /* It's passing over or to the bridge, locally. */ \
3048 ETHER_BPF_MTAP(bifp, m); \
3049 if_inc_counter(bifp, IFCOUNTER_IPACKETS, 1); \
3050 if_inc_counter(bifp, IFCOUNTER_IBYTES, m->m_pkthdr.len);\
3051 /* Hand the packet over to netmap if necessary. */ \
3052 GRAB_FOR_NETMAP(bifp, m); \
3053 /* Filter on the physical interface. */ \
3054 if (V_pfil_local_phys && PFIL_HOOKED_IN_46) { \
3055 if (bridge_pfil(&m, NULL, ifp, \
3056 PFIL_IN) != 0 || m == NULL) { \
3057 return (NULL); \
3058 } \
3059 } \
3060 if ((iface) != bifp) \
3061 ETHER_BPF_MTAP(iface, m); \
3062 /* Pass tagged packets to if_vlan, if it's loaded */ \
3063 if (VLANTAGOF(m) != 0) { \
3064 if (bifp->if_vlantrunk == NULL) { \
3065 m_freem(m); \
3066 return (NULL); \
3067 } \
3068 (*vlan_input_p)(bifp, m); \
3069 return (NULL); \
3070 } \
3071 return (m); \
3072 } \
3073 \
3074 /* We just received a packet that we sent out. */ \
3075 if (memcmp(IF_LLADDR(iface), eh->ether_shost, ETHER_ADDR_LEN) == 0 || \
3076 CARP_CHECK_WE_ARE_SRC(iface)) { \
3077 m_freem(m); \
3078 return (NULL); \
3079 }
3080
3081 /*
3082 * Unicast. Make sure it's not for the bridge.
3083 */
3084 do { GRAB_OUR_PACKETS(bifp) } while (0);
3085
3086 /*
3087 * If member_ifaddrs is enabled, see if the packet is destined for
3088 * one of the members' addresses.
3089 */
3090 if (V_member_ifaddrs) {
3091 /* Check the interface the packet arrived on. */
3092 do { GRAB_OUR_PACKETS(ifp) } while (0);
3093
3094 CK_LIST_FOREACH(bif2, &sc->sc_iflist, bif_next) {
3095 GRAB_OUR_PACKETS(bif2->bif_ifp)
3096 }
3097 }
3098
3099 #undef CARP_CHECK_WE_ARE_DST
3100 #undef CARP_CHECK_WE_ARE_SRC
3101 #undef GRAB_FOR_NETMAP
3102 #undef GRAB_OUR_PACKETS
3103
3104 /* Perform the bridge forwarding function. */
3105 bridge_forward(sc, bif, m);
3106
3107 return (NULL);
3108 }
3109
3110 /*
3111 * Inject a packet back into the host ethernet stack. This will generally only
3112 * be used by netmap when an application writes to the host TX ring. The
3113 * M_BRIDGE_INJECT flag ensures that the packet is re-routed to the bridge
3114 * interface after ethernet processing.
3115 */
3116 static void
bridge_inject(struct ifnet * ifp,struct mbuf * m)3117 bridge_inject(struct ifnet *ifp, struct mbuf *m)
3118 {
3119 struct bridge_softc *sc;
3120
3121 if (ifp->if_type == IFT_L2VLAN) {
3122 /*
3123 * vlan(4) gives us the vlan ifnet, so we need to get the
3124 * bridge softc to get a pointer to ether_input to send the
3125 * packet to.
3126 */
3127 struct ifnet *bifp = NULL;
3128
3129 if (vlan_trunkdev_p == NULL) {
3130 m_freem(m);
3131 return;
3132 }
3133
3134 bifp = vlan_trunkdev_p(ifp);
3135 if (bifp == NULL) {
3136 m_freem(m);
3137 return;
3138 }
3139
3140 sc = if_getsoftc(bifp);
3141 sc->sc_if_input(ifp, m);
3142 return;
3143 }
3144
3145 KASSERT((if_getcapenable(ifp) & IFCAP_NETMAP) != 0,
3146 ("%s: iface %s is not running in netmap mode",
3147 __func__, if_name(ifp)));
3148 KASSERT((m->m_flags & M_BRIDGE_INJECT) == 0,
3149 ("%s: mbuf %p has M_BRIDGE_INJECT set", __func__, m));
3150
3151 m->m_flags |= M_BRIDGE_INJECT;
3152 sc = if_getsoftc(ifp);
3153 sc->sc_if_input(ifp, m);
3154 }
3155
3156 /*
3157 * bridge_broadcast:
3158 *
3159 * Send a frame to all interfaces that are members of
3160 * the bridge, except for the one on which the packet
3161 * arrived.
3162 *
3163 * NOTE: Releases the lock on return.
3164 */
3165 static void
bridge_broadcast(struct bridge_softc * sc,struct ifnet * src_if,struct mbuf * m,int runfilt)3166 bridge_broadcast(struct bridge_softc *sc, struct ifnet *src_if,
3167 struct mbuf *m, int runfilt)
3168 {
3169 struct bridge_iflist *dbif, *sbif;
3170 struct mbuf *mc;
3171 struct ifnet *dst_if;
3172 int used = 0, i;
3173
3174 NET_EPOCH_ASSERT();
3175
3176 sbif = bridge_lookup_member_if(sc, src_if);
3177
3178 /* Filter on the bridge interface before broadcasting */
3179 if (runfilt && PFIL_HOOKED_OUT_46) {
3180 if (bridge_pfil(&m, sc->sc_ifp, NULL, PFIL_OUT) != 0)
3181 return;
3182 if (m == NULL)
3183 return;
3184 }
3185
3186 CK_LIST_FOREACH(dbif, &sc->sc_iflist, bif_next) {
3187 dst_if = dbif->bif_ifp;
3188 if (dst_if == src_if)
3189 continue;
3190
3191 /* Private segments can not talk to each other */
3192 if (sbif && (sbif->bif_flags & dbif->bif_flags & IFBIF_PRIVATE))
3193 continue;
3194
3195 if ((dbif->bif_flags & IFBIF_STP) &&
3196 dbif->bif_stp.bp_state == BSTP_IFSTATE_DISCARDING)
3197 continue;
3198
3199 if ((dbif->bif_flags & IFBIF_DISCOVER) == 0 &&
3200 (m->m_flags & (M_BCAST|M_MCAST)) == 0)
3201 continue;
3202
3203 if ((dst_if->if_drv_flags & IFF_DRV_RUNNING) == 0)
3204 continue;
3205
3206 if (CK_LIST_NEXT(dbif, bif_next) == NULL) {
3207 mc = m;
3208 used = 1;
3209 } else {
3210 mc = m_dup(m, M_NOWAIT);
3211 if (mc == NULL) {
3212 if_inc_counter(sc->sc_ifp, IFCOUNTER_OERRORS, 1);
3213 continue;
3214 }
3215 }
3216
3217 /*
3218 * Filter on the output interface. Pass a NULL bridge interface
3219 * pointer so we do not redundantly filter on the bridge for
3220 * each interface we broadcast on.
3221 */
3222 if (runfilt && PFIL_HOOKED_OUT_46) {
3223 if (used == 0) {
3224 /* Keep the layer3 header aligned */
3225 i = min(mc->m_pkthdr.len, max_protohdr);
3226 mc = m_copyup(mc, i, ETHER_ALIGN);
3227 if (mc == NULL) {
3228 if_inc_counter(sc->sc_ifp, IFCOUNTER_OERRORS, 1);
3229 continue;
3230 }
3231 }
3232 if (bridge_pfil(&mc, NULL, dst_if, PFIL_OUT) != 0)
3233 continue;
3234 if (mc == NULL)
3235 continue;
3236 }
3237
3238 bridge_enqueue(sc, dst_if, mc, dbif);
3239 }
3240 if (used == 0)
3241 m_freem(m);
3242 }
3243
3244 /*
3245 * bridge_span:
3246 *
3247 * Duplicate a packet out one or more interfaces that are in span mode,
3248 * the original mbuf is unmodified.
3249 */
3250 static void
bridge_span(struct bridge_softc * sc,struct mbuf * m)3251 bridge_span(struct bridge_softc *sc, struct mbuf *m)
3252 {
3253 struct bridge_iflist *bif;
3254 struct ifnet *dst_if;
3255 struct mbuf *mc;
3256
3257 NET_EPOCH_ASSERT();
3258
3259 if (CK_LIST_EMPTY(&sc->sc_spanlist))
3260 return;
3261
3262 CK_LIST_FOREACH(bif, &sc->sc_spanlist, bif_next) {
3263 dst_if = bif->bif_ifp;
3264
3265 if ((dst_if->if_drv_flags & IFF_DRV_RUNNING) == 0)
3266 continue;
3267
3268 mc = m_dup(m, M_NOWAIT);
3269 if (mc == NULL) {
3270 if_inc_counter(sc->sc_ifp, IFCOUNTER_OERRORS, 1);
3271 continue;
3272 }
3273
3274 bridge_enqueue(sc, dst_if, mc, bif);
3275 }
3276 }
3277
3278 /*
3279 * Incoming VLAN filtering. Given a frame and the member interface it was
3280 * received on, decide whether the port configuration allows it.
3281 */
3282 static bool
bridge_vfilter_in(const struct bridge_iflist * sbif,struct mbuf * m)3283 bridge_vfilter_in(const struct bridge_iflist *sbif, struct mbuf *m)
3284 {
3285 ether_vlanid_t vlan;
3286
3287 vlan = VLANTAGOF(m);
3288 /* Make sure the vlan id is reasonable. */
3289 if (vlan > DOT1Q_VID_MAX)
3290 return (false);
3291
3292 /*
3293 * If VLAN filtering isn't enabled, pass everything, but add a tag
3294 * if the port has a pvid configured.
3295 */
3296 if ((sbif->bif_sc->sc_flags & IFBRF_VLANFILTER) == 0) {
3297 if (vlan == DOT1Q_VID_NULL &&
3298 sbif->bif_pvid != DOT1Q_VID_NULL) {
3299 m->m_pkthdr.ether_vtag = sbif->bif_pvid;
3300 m->m_flags |= M_VLANTAG;
3301 }
3302
3303 return (true);
3304 }
3305
3306 /* If Q-in-Q is disabled, check for stacked tags. */
3307 if ((sbif->bif_flags & IFBIF_QINQ) == 0) {
3308 struct ether_header *eh;
3309 uint16_t proto;
3310
3311 eh = mtod(m, struct ether_header *);
3312 proto = ntohs(eh->ether_type);
3313
3314 if (proto == ETHERTYPE_VLAN || proto == ETHERTYPE_QINQ)
3315 return (false);
3316 }
3317
3318 if (vlan == DOT1Q_VID_NULL) {
3319 /*
3320 * The frame doesn't have a tag. If the interface does not
3321 * have an untagged vlan configured, drop the frame.
3322 */
3323 if (sbif->bif_pvid == DOT1Q_VID_NULL)
3324 return (false);
3325
3326 /*
3327 * Otherwise, insert a new tag based on the interface's
3328 * untagged vlan id.
3329 */
3330 m->m_pkthdr.ether_vtag = sbif->bif_pvid;
3331 m->m_flags |= M_VLANTAG;
3332 } else {
3333 /*
3334 * The frame has a tag, so check it matches the interface's
3335 * vlan access list. We explicitly do not accept tagged
3336 * frames for the untagged vlan id here (unless it's also
3337 * in the access list).
3338 */
3339 if (!BRVLAN_TEST(&sbif->bif_vlan_set, vlan))
3340 return (false);
3341 }
3342
3343 /* Accept the frame. */
3344 return (true);
3345 }
3346
3347 /*
3348 * Outgoing VLAN filtering. Given a frame, its vlan, and the member interface
3349 * we intend to send it to, decide whether the port configuration allows it to
3350 * be sent.
3351 */
3352 static bool
bridge_vfilter_out(const struct bridge_iflist * dbif,const struct mbuf * m)3353 bridge_vfilter_out(const struct bridge_iflist *dbif, const struct mbuf *m)
3354 {
3355 struct ether_header *eh;
3356 ether_vlanid_t vlan;
3357
3358 NET_EPOCH_ASSERT();
3359
3360 /*
3361 * If the interface is in span mode, then bif_sc will be NULL.
3362 * Since the purpose of span interfaces is to receive all frames,
3363 * pass everything.
3364 */
3365 if (dbif->bif_sc == NULL)
3366 return (true);
3367
3368 /* If VLAN filtering isn't enabled, pass everything. */
3369 if ((dbif->bif_sc->sc_flags & IFBRF_VLANFILTER) == 0)
3370 return (true);
3371
3372 vlan = VLANTAGOF(m);
3373
3374 /*
3375 * Always allow untagged 802.1D STP frames, even if they would
3376 * otherwise be dropped. This is required for STP to work on
3377 * a filtering bridge.
3378 *
3379 * Tagged STP (Cisco PVST+) is a non-standard extension, so
3380 * handle those frames via the normal filtering path.
3381 */
3382 eh = mtod(m, struct ether_header *);
3383 if (vlan == DOT1Q_VID_NULL &&
3384 memcmp(eh->ether_dhost, bstp_etheraddr, ETHER_ADDR_LEN) == 0)
3385 return (true);
3386
3387 /*
3388 * If the frame wasn't assigned to a vlan at ingress, drop it.
3389 * We can't forward these frames to filtering ports because we
3390 * don't know what VLAN they're supposed to be in.
3391 */
3392 if (vlan == DOT1Q_VID_NULL)
3393 return (false);
3394
3395 /*
3396 * If the frame's vlan matches the interfaces's untagged vlan,
3397 * allow it.
3398 */
3399 if (vlan == dbif->bif_pvid)
3400 return (true);
3401
3402 /*
3403 * If the frame's vlan is on the interface's tagged access list,
3404 * allow it.
3405 */
3406 if (BRVLAN_TEST(&dbif->bif_vlan_set, vlan))
3407 return (true);
3408
3409 /* The frame was not permitted, so drop it. */
3410 return (false);
3411 }
3412
3413 /*
3414 * bridge_rtupdate:
3415 *
3416 * Add a bridge routing entry.
3417 */
3418 static int
bridge_rtupdate(struct bridge_softc * sc,const uint8_t * dst,ether_vlanid_t vlan,struct bridge_iflist * bif,int setflags,uint8_t flags)3419 bridge_rtupdate(struct bridge_softc *sc, const uint8_t *dst,
3420 ether_vlanid_t vlan, struct bridge_iflist *bif,
3421 int setflags, uint8_t flags)
3422 {
3423 struct bridge_rtnode *brt;
3424 struct bridge_iflist *obif;
3425 int error;
3426
3427 BRIDGE_LOCK_OR_NET_EPOCH_ASSERT(sc);
3428
3429 /* Check the source address is valid and not multicast. */
3430 if (ETHER_IS_MULTICAST(dst))
3431 return (EXTERROR(EINVAL, "Multicast address not permitted"));
3432 if (dst[0] == 0 && dst[1] == 0 && dst[2] == 0 &&
3433 dst[3] == 0 && dst[4] == 0 && dst[5] == 0)
3434 return (EXTERROR(EINVAL, "Zero address not permitted"));
3435
3436 /*
3437 * A route for this destination might already exist. If so,
3438 * update it, otherwise create a new one.
3439 */
3440 if ((brt = bridge_rtnode_lookup(sc, dst, vlan)) == NULL) {
3441 BRIDGE_RT_LOCK(sc);
3442
3443 /* Check again, now that we have the lock. There could have
3444 * been a race and we only want to insert this once. */
3445 if (bridge_rtnode_lookup(sc, dst, vlan) != NULL) {
3446 BRIDGE_RT_UNLOCK(sc);
3447 return (0);
3448 }
3449
3450 if (sc->sc_brtcnt >= sc->sc_brtmax) {
3451 sc->sc_brtexceeded++;
3452 BRIDGE_RT_UNLOCK(sc);
3453 return (EXTERROR(ENOSPC, "Address table is full"));
3454 }
3455 /* Check per interface address limits (if enabled) */
3456 if (bif->bif_addrmax && bif->bif_addrcnt >= bif->bif_addrmax) {
3457 bif->bif_addrexceeded++;
3458 BRIDGE_RT_UNLOCK(sc);
3459 return (EXTERROR(ENOSPC,
3460 "Interface address limit exceeded"));
3461 }
3462
3463 /*
3464 * Allocate a new bridge forwarding node, and
3465 * initialize the expiration time and Ethernet
3466 * address.
3467 */
3468 brt = uma_zalloc(V_bridge_rtnode_zone, M_NOWAIT | M_ZERO);
3469 if (brt == NULL) {
3470 BRIDGE_RT_UNLOCK(sc);
3471 return (EXTERROR(ENOMEM,
3472 "Cannot allocate address node"));
3473 }
3474 brt->brt_vnet = curvnet;
3475
3476 if (bif->bif_flags & IFBIF_STICKY)
3477 brt->brt_flags = IFBAF_STICKY;
3478 else
3479 brt->brt_flags = IFBAF_DYNAMIC;
3480
3481 memcpy(brt->brt_addr, dst, ETHER_ADDR_LEN);
3482 brt->brt_vlan = vlan;
3483
3484 brt->brt_dst = bif;
3485 if ((error = bridge_rtnode_insert(sc, brt)) != 0) {
3486 uma_zfree(V_bridge_rtnode_zone, brt);
3487 BRIDGE_RT_UNLOCK(sc);
3488 return (error);
3489 }
3490 bif->bif_addrcnt++;
3491
3492 BRIDGE_RT_UNLOCK(sc);
3493 }
3494
3495 if ((brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC &&
3496 (obif = brt->brt_dst) != bif) {
3497 MPASS(obif != NULL);
3498
3499 BRIDGE_RT_LOCK(sc);
3500 brt->brt_dst->bif_addrcnt--;
3501 brt->brt_dst = bif;
3502 brt->brt_dst->bif_addrcnt++;
3503 BRIDGE_RT_UNLOCK(sc);
3504
3505 if (V_log_mac_flap &&
3506 ppsratecheck(&V_log_last, &V_log_count, V_log_interval)) {
3507 log(LOG_NOTICE,
3508 "%s: mac address %6D vlan %d moved from %s to %s\n",
3509 sc->sc_ifp->if_xname,
3510 &brt->brt_addr[0], ":",
3511 brt->brt_vlan,
3512 obif->bif_ifp->if_xname,
3513 bif->bif_ifp->if_xname);
3514 }
3515 }
3516
3517 if ((flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC)
3518 brt->brt_expire = time_uptime + sc->sc_brttimeout;
3519 if (setflags)
3520 brt->brt_flags = flags;
3521
3522 return (0);
3523 }
3524
3525 /*
3526 * bridge_rtlookup:
3527 *
3528 * Lookup the destination interface for an address.
3529 */
3530 static struct ifnet *
bridge_rtlookup(struct bridge_softc * sc,const uint8_t * addr,ether_vlanid_t vlan)3531 bridge_rtlookup(struct bridge_softc *sc, const uint8_t *addr,
3532 ether_vlanid_t vlan)
3533 {
3534 struct bridge_rtnode *brt;
3535
3536 NET_EPOCH_ASSERT();
3537
3538 if ((brt = bridge_rtnode_lookup(sc, addr, vlan)) == NULL)
3539 return (NULL);
3540
3541 return (brt->brt_ifp);
3542 }
3543
3544 /*
3545 * bridge_rttrim:
3546 *
3547 * Trim the routine table so that we have a number
3548 * of routing entries less than or equal to the
3549 * maximum number.
3550 */
3551 static void
bridge_rttrim(struct bridge_softc * sc)3552 bridge_rttrim(struct bridge_softc *sc)
3553 {
3554 struct bridge_rtnode *brt, *nbrt;
3555
3556 NET_EPOCH_ASSERT();
3557 BRIDGE_RT_LOCK_ASSERT(sc);
3558
3559 /* Make sure we actually need to do this. */
3560 if (sc->sc_brtcnt <= sc->sc_brtmax)
3561 return;
3562
3563 /* Force an aging cycle; this might trim enough addresses. */
3564 bridge_rtage(sc);
3565 if (sc->sc_brtcnt <= sc->sc_brtmax)
3566 return;
3567
3568 CK_LIST_FOREACH_SAFE(brt, &sc->sc_rtlist, brt_list, nbrt) {
3569 if ((brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC) {
3570 bridge_rtnode_destroy(sc, brt);
3571 if (sc->sc_brtcnt <= sc->sc_brtmax)
3572 return;
3573 }
3574 }
3575 }
3576
3577 /*
3578 * bridge_timer:
3579 *
3580 * Aging timer for the bridge.
3581 */
3582 static void
bridge_timer(void * arg)3583 bridge_timer(void *arg)
3584 {
3585 struct bridge_softc *sc = arg;
3586
3587 BRIDGE_RT_LOCK_ASSERT(sc);
3588
3589 /* Destruction of rtnodes requires a proper vnet context */
3590 CURVNET_SET(sc->sc_ifp->if_vnet);
3591 bridge_rtage(sc);
3592
3593 if (sc->sc_ifp->if_drv_flags & IFF_DRV_RUNNING)
3594 callout_reset(&sc->sc_brcallout,
3595 bridge_rtable_prune_period * hz, bridge_timer, sc);
3596 CURVNET_RESTORE();
3597 }
3598
3599 /*
3600 * bridge_rtage:
3601 *
3602 * Perform an aging cycle.
3603 */
3604 static void
bridge_rtage(struct bridge_softc * sc)3605 bridge_rtage(struct bridge_softc *sc)
3606 {
3607 struct bridge_rtnode *brt, *nbrt;
3608
3609 BRIDGE_RT_LOCK_ASSERT(sc);
3610
3611 CK_LIST_FOREACH_SAFE(brt, &sc->sc_rtlist, brt_list, nbrt) {
3612 if ((brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC) {
3613 if (time_uptime >= brt->brt_expire)
3614 bridge_rtnode_destroy(sc, brt);
3615 }
3616 }
3617 }
3618
3619 /*
3620 * bridge_rtflush:
3621 *
3622 * Remove all dynamic addresses from the bridge.
3623 */
3624 static void
bridge_rtflush(struct bridge_softc * sc,int full)3625 bridge_rtflush(struct bridge_softc *sc, int full)
3626 {
3627 struct bridge_rtnode *brt, *nbrt;
3628
3629 BRIDGE_RT_LOCK_ASSERT(sc);
3630
3631 CK_LIST_FOREACH_SAFE(brt, &sc->sc_rtlist, brt_list, nbrt) {
3632 if (full || (brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC)
3633 bridge_rtnode_destroy(sc, brt);
3634 }
3635 }
3636
3637 /*
3638 * bridge_rtdaddr:
3639 *
3640 * Remove an address from the table.
3641 */
3642 static int
bridge_rtdaddr(struct bridge_softc * sc,const uint8_t * addr,ether_vlanid_t vlan)3643 bridge_rtdaddr(struct bridge_softc *sc, const uint8_t *addr,
3644 ether_vlanid_t vlan)
3645 {
3646 struct bridge_rtnode *brt;
3647 int found = 0;
3648
3649 BRIDGE_RT_LOCK(sc);
3650
3651 /*
3652 * If vlan is DOT1Q_VID_RSVD_IMPL then we want to delete for all vlans
3653 * so the lookup may return more than one.
3654 */
3655 while ((brt = bridge_rtnode_lookup(sc, addr, vlan)) != NULL) {
3656 bridge_rtnode_destroy(sc, brt);
3657 found = 1;
3658 }
3659
3660 BRIDGE_RT_UNLOCK(sc);
3661
3662 return (found ? 0 : ENOENT);
3663 }
3664
3665 /*
3666 * bridge_rtdelete:
3667 *
3668 * Delete routes to a speicifc member interface.
3669 */
3670 static void
bridge_rtdelete(struct bridge_softc * sc,struct ifnet * ifp,int full)3671 bridge_rtdelete(struct bridge_softc *sc, struct ifnet *ifp, int full)
3672 {
3673 struct bridge_rtnode *brt, *nbrt;
3674
3675 BRIDGE_RT_LOCK_ASSERT(sc);
3676
3677 CK_LIST_FOREACH_SAFE(brt, &sc->sc_rtlist, brt_list, nbrt) {
3678 if (brt->brt_ifp == ifp && (full ||
3679 (brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC))
3680 bridge_rtnode_destroy(sc, brt);
3681 }
3682 }
3683
3684 /*
3685 * bridge_rtable_init:
3686 *
3687 * Initialize the route table for this bridge.
3688 */
3689 static void
bridge_rtable_init(struct bridge_softc * sc)3690 bridge_rtable_init(struct bridge_softc *sc)
3691 {
3692 int i;
3693
3694 sc->sc_rthash = malloc(sizeof(*sc->sc_rthash) * BRIDGE_RTHASH_SIZE,
3695 M_DEVBUF, M_WAITOK);
3696
3697 for (i = 0; i < BRIDGE_RTHASH_SIZE; i++)
3698 CK_LIST_INIT(&sc->sc_rthash[i]);
3699
3700 sc->sc_rthash_key = arc4random();
3701 CK_LIST_INIT(&sc->sc_rtlist);
3702 }
3703
3704 /*
3705 * bridge_rtable_fini:
3706 *
3707 * Deconstruct the route table for this bridge.
3708 */
3709 static void
bridge_rtable_fini(struct bridge_softc * sc)3710 bridge_rtable_fini(struct bridge_softc *sc)
3711 {
3712
3713 KASSERT(sc->sc_brtcnt == 0,
3714 ("%s: %d bridge routes referenced", __func__, sc->sc_brtcnt));
3715 free(sc->sc_rthash, M_DEVBUF);
3716 }
3717
3718 /*
3719 * The following hash function is adapted from "Hash Functions" by Bob Jenkins
3720 * ("Algorithm Alley", Dr. Dobbs Journal, September 1997).
3721 */
3722 #define mix(a, b, c) \
3723 do { \
3724 a -= b; a -= c; a ^= (c >> 13); \
3725 b -= c; b -= a; b ^= (a << 8); \
3726 c -= a; c -= b; c ^= (b >> 13); \
3727 a -= b; a -= c; a ^= (c >> 12); \
3728 b -= c; b -= a; b ^= (a << 16); \
3729 c -= a; c -= b; c ^= (b >> 5); \
3730 a -= b; a -= c; a ^= (c >> 3); \
3731 b -= c; b -= a; b ^= (a << 10); \
3732 c -= a; c -= b; c ^= (b >> 15); \
3733 } while (/*CONSTCOND*/0)
3734
3735 static __inline uint32_t
bridge_rthash(struct bridge_softc * sc,const uint8_t * addr)3736 bridge_rthash(struct bridge_softc *sc, const uint8_t *addr)
3737 {
3738 uint32_t a = 0x9e3779b9, b = 0x9e3779b9, c = sc->sc_rthash_key;
3739
3740 b += addr[5] << 8;
3741 b += addr[4];
3742 a += addr[3] << 24;
3743 a += addr[2] << 16;
3744 a += addr[1] << 8;
3745 a += addr[0];
3746
3747 mix(a, b, c);
3748
3749 return (c & BRIDGE_RTHASH_MASK);
3750 }
3751
3752 #undef mix
3753
3754 static int
bridge_rtnode_addr_cmp(const uint8_t * a,const uint8_t * b)3755 bridge_rtnode_addr_cmp(const uint8_t *a, const uint8_t *b)
3756 {
3757 int i, d;
3758
3759 for (i = 0, d = 0; i < ETHER_ADDR_LEN && d == 0; i++) {
3760 d = ((int)a[i]) - ((int)b[i]);
3761 }
3762
3763 return (d);
3764 }
3765
3766 /*
3767 * bridge_rtnode_lookup:
3768 *
3769 * Look up a bridge route node for the specified destination. Compare the
3770 * vlan id or if zero then just return the first match.
3771 */
3772 static struct bridge_rtnode *
bridge_rtnode_lookup(struct bridge_softc * sc,const uint8_t * addr,ether_vlanid_t vlan)3773 bridge_rtnode_lookup(struct bridge_softc *sc, const uint8_t *addr,
3774 ether_vlanid_t vlan)
3775 {
3776 struct bridge_rtnode *brt;
3777 uint32_t hash;
3778 int dir;
3779
3780 BRIDGE_RT_LOCK_OR_NET_EPOCH_ASSERT(sc);
3781
3782 hash = bridge_rthash(sc, addr);
3783 CK_LIST_FOREACH(brt, &sc->sc_rthash[hash], brt_hash) {
3784 dir = bridge_rtnode_addr_cmp(addr, brt->brt_addr);
3785 if (dir == 0 && (brt->brt_vlan == vlan || vlan == DOT1Q_VID_RSVD_IMPL))
3786 return (brt);
3787 if (dir > 0)
3788 return (NULL);
3789 }
3790
3791 return (NULL);
3792 }
3793
3794 /*
3795 * bridge_rtnode_insert:
3796 *
3797 * Insert the specified bridge node into the route table. We
3798 * assume the entry is not already in the table.
3799 */
3800 static int
bridge_rtnode_insert(struct bridge_softc * sc,struct bridge_rtnode * brt)3801 bridge_rtnode_insert(struct bridge_softc *sc, struct bridge_rtnode *brt)
3802 {
3803 struct bridge_rtnode *lbrt;
3804 uint32_t hash;
3805 int dir;
3806
3807 BRIDGE_RT_LOCK_ASSERT(sc);
3808
3809 hash = bridge_rthash(sc, brt->brt_addr);
3810
3811 lbrt = CK_LIST_FIRST(&sc->sc_rthash[hash]);
3812 if (lbrt == NULL) {
3813 CK_LIST_INSERT_HEAD(&sc->sc_rthash[hash], brt, brt_hash);
3814 goto out;
3815 }
3816
3817 do {
3818 dir = bridge_rtnode_addr_cmp(brt->brt_addr, lbrt->brt_addr);
3819 if (dir == 0 && brt->brt_vlan == lbrt->brt_vlan)
3820 return (EXTERROR(EEXIST, "Address already exists"));
3821 if (dir > 0) {
3822 CK_LIST_INSERT_BEFORE(lbrt, brt, brt_hash);
3823 goto out;
3824 }
3825 if (CK_LIST_NEXT(lbrt, brt_hash) == NULL) {
3826 CK_LIST_INSERT_AFTER(lbrt, brt, brt_hash);
3827 goto out;
3828 }
3829 lbrt = CK_LIST_NEXT(lbrt, brt_hash);
3830 } while (lbrt != NULL);
3831
3832 #ifdef DIAGNOSTIC
3833 panic("bridge_rtnode_insert: impossible");
3834 #endif
3835
3836 out:
3837 CK_LIST_INSERT_HEAD(&sc->sc_rtlist, brt, brt_list);
3838 sc->sc_brtcnt++;
3839
3840 return (0);
3841 }
3842
3843 static void
bridge_rtnode_destroy_cb(struct epoch_context * ctx)3844 bridge_rtnode_destroy_cb(struct epoch_context *ctx)
3845 {
3846 struct bridge_rtnode *brt;
3847
3848 brt = __containerof(ctx, struct bridge_rtnode, brt_epoch_ctx);
3849
3850 CURVNET_SET(brt->brt_vnet);
3851 uma_zfree(V_bridge_rtnode_zone, brt);
3852 CURVNET_RESTORE();
3853 }
3854
3855 /*
3856 * bridge_rtnode_destroy:
3857 *
3858 * Destroy a bridge rtnode.
3859 */
3860 static void
bridge_rtnode_destroy(struct bridge_softc * sc,struct bridge_rtnode * brt)3861 bridge_rtnode_destroy(struct bridge_softc *sc, struct bridge_rtnode *brt)
3862 {
3863 BRIDGE_RT_LOCK_ASSERT(sc);
3864
3865 CK_LIST_REMOVE(brt, brt_hash);
3866
3867 CK_LIST_REMOVE(brt, brt_list);
3868 sc->sc_brtcnt--;
3869 brt->brt_dst->bif_addrcnt--;
3870
3871 NET_EPOCH_CALL(bridge_rtnode_destroy_cb, &brt->brt_epoch_ctx);
3872 }
3873
3874 /*
3875 * bridge_rtable_expire:
3876 *
3877 * Set the expiry time for all routes on an interface.
3878 */
3879 static void
bridge_rtable_expire(struct ifnet * ifp,int age)3880 bridge_rtable_expire(struct ifnet *ifp, int age)
3881 {
3882 struct bridge_iflist *bif = NULL;
3883 struct bridge_softc *sc = NULL;
3884 struct bridge_rtnode *brt;
3885
3886 CURVNET_SET(ifp->if_vnet);
3887
3888 bif = ifp->if_bridge;
3889 if (bif)
3890 sc = bif->bif_sc;
3891 MPASS(sc != NULL);
3892 BRIDGE_RT_LOCK(sc);
3893
3894 /*
3895 * If the age is zero then flush, otherwise set all the expiry times to
3896 * age for the interface
3897 */
3898 if (age == 0)
3899 bridge_rtdelete(sc, ifp, IFBF_FLUSHDYN);
3900 else {
3901 CK_LIST_FOREACH(brt, &sc->sc_rtlist, brt_list) {
3902 /* Cap the expiry time to 'age' */
3903 if (brt->brt_ifp == ifp &&
3904 brt->brt_expire > time_uptime + age &&
3905 (brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC)
3906 brt->brt_expire = time_uptime + age;
3907 }
3908 }
3909 BRIDGE_RT_UNLOCK(sc);
3910 CURVNET_RESTORE();
3911 }
3912
3913 /*
3914 * bridge_state_change:
3915 *
3916 * Callback from the bridgestp code when a port changes states.
3917 */
3918 static void
bridge_state_change(struct ifnet * ifp,int state)3919 bridge_state_change(struct ifnet *ifp, int state)
3920 {
3921 struct bridge_iflist *bif = ifp->if_bridge;
3922 struct bridge_softc *sc = bif->bif_sc;
3923 static const char *stpstates[] = {
3924 "disabled",
3925 "listening",
3926 "learning",
3927 "forwarding",
3928 "blocking",
3929 "discarding"
3930 };
3931
3932 CURVNET_SET(ifp->if_vnet);
3933 if (V_log_stp)
3934 log(LOG_NOTICE, "%s: state changed to %s on %s\n",
3935 sc->sc_ifp->if_xname, stpstates[state], ifp->if_xname);
3936 CURVNET_RESTORE();
3937 }
3938
3939 /*
3940 * Send bridge packets through pfil if they are one of the types pfil can deal
3941 * with, or if they are ARP or REVARP. (pfil will pass ARP and REVARP without
3942 * question.) If *bifp or *ifp are NULL then packet filtering is skipped for
3943 * that interface.
3944 */
3945 static int
bridge_pfil(struct mbuf ** mp,struct ifnet * bifp,struct ifnet * ifp,int dir)3946 bridge_pfil(struct mbuf **mp, struct ifnet *bifp, struct ifnet *ifp, int dir)
3947 {
3948 int snap, error, i;
3949 struct ether_header *eh1, eh2;
3950 struct llc llc1;
3951 u_int16_t ether_type;
3952 pfil_return_t rv;
3953 #ifdef INET
3954 struct ip *ip = NULL;
3955 int hlen = 0;
3956 #endif
3957
3958 snap = 0;
3959 error = -1; /* Default error if not error == 0 */
3960
3961 #if 0
3962 /* we may return with the IP fields swapped, ensure its not shared */
3963 KASSERT(M_WRITABLE(*mp), ("%s: modifying a shared mbuf", __func__));
3964 #endif
3965
3966 if (V_pfil_bridge == 0 && V_pfil_member == 0 && V_pfil_ipfw == 0)
3967 return (0); /* filtering is disabled */
3968
3969 i = min((*mp)->m_pkthdr.len, max_protohdr);
3970 if ((*mp)->m_len < i) {
3971 *mp = m_pullup(*mp, i);
3972 if (*mp == NULL) {
3973 printf("%s: m_pullup failed\n", __func__);
3974 return (-1);
3975 }
3976 }
3977
3978 eh1 = mtod(*mp, struct ether_header *);
3979 ether_type = ntohs(eh1->ether_type);
3980
3981 /*
3982 * Check for SNAP/LLC.
3983 */
3984 if (ether_type < ETHERMTU) {
3985 struct llc *llc2 = (struct llc *)(eh1 + 1);
3986
3987 if ((*mp)->m_len >= ETHER_HDR_LEN + 8 &&
3988 llc2->llc_dsap == LLC_SNAP_LSAP &&
3989 llc2->llc_ssap == LLC_SNAP_LSAP &&
3990 llc2->llc_control == LLC_UI) {
3991 ether_type = htons(llc2->llc_un.type_snap.ether_type);
3992 snap = 1;
3993 }
3994 }
3995
3996 /*
3997 * If we're trying to filter bridge traffic, only look at traffic for
3998 * protocols available in the kernel (IPv4 and/or IPv6) to avoid
3999 * passing traffic for an unsupported protocol to the filter. This is
4000 * lame since if we really wanted, say, an AppleTalk filter, we are
4001 * hosed, but of course we don't have an AppleTalk filter to begin
4002 * with. (Note that since pfil doesn't understand ARP it will pass
4003 * *ALL* ARP traffic.)
4004 */
4005 switch (ether_type) {
4006 #ifdef INET
4007 case ETHERTYPE_ARP:
4008 case ETHERTYPE_REVARP:
4009 if (V_pfil_ipfw_arp == 0)
4010 return (0); /* Automatically pass */
4011
4012 /* FALLTHROUGH */
4013 case ETHERTYPE_IP:
4014 #endif
4015 #ifdef INET6
4016 case ETHERTYPE_IPV6:
4017 #endif /* INET6 */
4018 break;
4019
4020 default:
4021 /*
4022 * We get here if the packet isn't from a supported
4023 * protocol. Check to see if the user wants to pass
4024 * non-IP packets, these will not be checked by pfil(9)
4025 * and passed unconditionally so the default is to
4026 * drop.
4027 */
4028 if (V_pfil_onlyip)
4029 goto bad;
4030 }
4031
4032 /* Run the packet through pfil before stripping link headers */
4033 if (PFIL_HOOKED_OUT(V_link_pfil_head) && V_pfil_ipfw != 0 &&
4034 dir == PFIL_OUT && ifp != NULL) {
4035 switch (pfil_mbuf_out(V_link_pfil_head, mp, ifp, NULL)) {
4036 case PFIL_DROPPED:
4037 return (EACCES);
4038 case PFIL_CONSUMED:
4039 return (0);
4040 }
4041 }
4042
4043 /* Strip off the Ethernet header and keep a copy. */
4044 m_copydata(*mp, 0, ETHER_HDR_LEN, (caddr_t) &eh2);
4045 m_adj(*mp, ETHER_HDR_LEN);
4046
4047 /* Strip off snap header, if present */
4048 if (snap) {
4049 m_copydata(*mp, 0, sizeof(struct llc), (caddr_t) &llc1);
4050 m_adj(*mp, sizeof(struct llc));
4051 }
4052
4053 /*
4054 * Check the IP header for alignment and errors
4055 */
4056 if (dir == PFIL_IN) {
4057 switch (ether_type) {
4058 #ifdef INET
4059 case ETHERTYPE_IP:
4060 error = bridge_ip_checkbasic(mp);
4061 break;
4062 #endif
4063 #ifdef INET6
4064 case ETHERTYPE_IPV6:
4065 error = bridge_ip6_checkbasic(mp);
4066 break;
4067 #endif /* INET6 */
4068 default:
4069 error = 0;
4070 }
4071 if (error)
4072 goto bad;
4073 }
4074
4075 error = 0;
4076
4077 /*
4078 * Run the packet through pfil
4079 */
4080 rv = PFIL_PASS;
4081 switch (ether_type) {
4082 #ifdef INET
4083 case ETHERTYPE_IP:
4084 /*
4085 * Run pfil on the member interface and the bridge, both can
4086 * be skipped by clearing pfil_member or pfil_bridge.
4087 *
4088 * Keep the order:
4089 * in_if -> bridge_if -> out_if
4090 */
4091 if (V_pfil_bridge && dir == PFIL_OUT && bifp != NULL && (rv =
4092 pfil_mbuf_out(V_inet_pfil_head, mp, bifp, NULL)) !=
4093 PFIL_PASS)
4094 break;
4095
4096 if (V_pfil_member && ifp != NULL) {
4097 rv = (dir == PFIL_OUT) ?
4098 pfil_mbuf_out(V_inet_pfil_head, mp, ifp, NULL) :
4099 pfil_mbuf_in(V_inet_pfil_head, mp, ifp, NULL);
4100 if (rv != PFIL_PASS)
4101 break;
4102 }
4103
4104 if (V_pfil_bridge && dir == PFIL_IN && bifp != NULL && (rv =
4105 pfil_mbuf_in(V_inet_pfil_head, mp, bifp, NULL)) !=
4106 PFIL_PASS)
4107 break;
4108
4109 /* check if we need to fragment the packet */
4110 /* bridge_fragment generates a mbuf chain of packets */
4111 /* that already include eth headers */
4112 if (V_pfil_member && ifp != NULL && dir == PFIL_OUT) {
4113 i = (*mp)->m_pkthdr.len;
4114 if (i > ifp->if_mtu) {
4115 error = bridge_fragment(ifp, mp, &eh2, snap,
4116 &llc1);
4117 return (error);
4118 }
4119 }
4120
4121 /* Recalculate the ip checksum. */
4122 ip = mtod(*mp, struct ip *);
4123 hlen = ip->ip_hl << 2;
4124 if (hlen < sizeof(struct ip))
4125 goto bad;
4126 if (hlen > (*mp)->m_len) {
4127 if ((*mp = m_pullup(*mp, hlen)) == NULL)
4128 goto bad;
4129 ip = mtod(*mp, struct ip *);
4130 if (ip == NULL)
4131 goto bad;
4132 }
4133 ip->ip_sum = 0;
4134 if (hlen == sizeof(struct ip))
4135 ip->ip_sum = in_cksum_hdr(ip);
4136 else
4137 ip->ip_sum = in_cksum(*mp, hlen);
4138
4139 break;
4140 #endif /* INET */
4141 #ifdef INET6
4142 case ETHERTYPE_IPV6:
4143 if (V_pfil_bridge && dir == PFIL_OUT && bifp != NULL && (rv =
4144 pfil_mbuf_out(V_inet6_pfil_head, mp, bifp, NULL)) !=
4145 PFIL_PASS)
4146 break;
4147
4148 if (V_pfil_member && ifp != NULL) {
4149 rv = (dir == PFIL_OUT) ?
4150 pfil_mbuf_out(V_inet6_pfil_head, mp, ifp, NULL) :
4151 pfil_mbuf_in(V_inet6_pfil_head, mp, ifp, NULL);
4152 if (rv != PFIL_PASS)
4153 break;
4154 }
4155
4156 if (V_pfil_bridge && dir == PFIL_IN && bifp != NULL && (rv =
4157 pfil_mbuf_in(V_inet6_pfil_head, mp, bifp, NULL)) !=
4158 PFIL_PASS)
4159 break;
4160 break;
4161 #endif
4162 }
4163
4164 switch (rv) {
4165 case PFIL_CONSUMED:
4166 return (0);
4167 case PFIL_DROPPED:
4168 return (EACCES);
4169 default:
4170 break;
4171 }
4172
4173 error = -1;
4174
4175 /*
4176 * Finally, put everything back the way it was and return
4177 */
4178 if (snap) {
4179 M_PREPEND(*mp, sizeof(struct llc), M_NOWAIT);
4180 if (*mp == NULL)
4181 return (error);
4182 bcopy(&llc1, mtod(*mp, caddr_t), sizeof(struct llc));
4183 }
4184
4185 M_PREPEND(*mp, ETHER_HDR_LEN, M_NOWAIT);
4186 if (*mp == NULL)
4187 return (error);
4188 bcopy(&eh2, mtod(*mp, caddr_t), ETHER_HDR_LEN);
4189
4190 return (0);
4191
4192 bad:
4193 m_freem(*mp);
4194 *mp = NULL;
4195 return (error);
4196 }
4197
4198 #ifdef INET
4199 /*
4200 * Perform basic checks on header size since
4201 * pfil assumes ip_input has already processed
4202 * it for it. Cut-and-pasted from ip_input.c.
4203 * Given how simple the IPv6 version is,
4204 * does the IPv4 version really need to be
4205 * this complicated?
4206 *
4207 * XXX Should we update ipstat here, or not?
4208 * XXX Right now we update ipstat but not
4209 * XXX csum_counter.
4210 */
4211 static int
bridge_ip_checkbasic(struct mbuf ** mp)4212 bridge_ip_checkbasic(struct mbuf **mp)
4213 {
4214 struct mbuf *m = *mp;
4215 struct ip *ip;
4216 int len, hlen;
4217 u_short sum;
4218
4219 if (*mp == NULL)
4220 return (-1);
4221
4222 if (IP_HDR_ALIGNED_P(mtod(m, caddr_t)) == 0) {
4223 if ((m = m_copyup(m, sizeof(struct ip),
4224 (max_linkhdr + 3) & ~3)) == NULL) {
4225 /* XXXJRT new stat, please */
4226 KMOD_IPSTAT_INC(ips_toosmall);
4227 goto bad;
4228 }
4229 } else if (__predict_false(m->m_len < sizeof (struct ip))) {
4230 if ((m = m_pullup(m, sizeof (struct ip))) == NULL) {
4231 KMOD_IPSTAT_INC(ips_toosmall);
4232 goto bad;
4233 }
4234 }
4235 ip = mtod(m, struct ip *);
4236 if (ip == NULL) goto bad;
4237
4238 if (ip->ip_v != IPVERSION) {
4239 KMOD_IPSTAT_INC(ips_badvers);
4240 goto bad;
4241 }
4242 hlen = ip->ip_hl << 2;
4243 if (hlen < sizeof(struct ip)) { /* minimum header length */
4244 KMOD_IPSTAT_INC(ips_badhlen);
4245 goto bad;
4246 }
4247 if (hlen > m->m_len) {
4248 if ((m = m_pullup(m, hlen)) == NULL) {
4249 KMOD_IPSTAT_INC(ips_badhlen);
4250 goto bad;
4251 }
4252 ip = mtod(m, struct ip *);
4253 if (ip == NULL) goto bad;
4254 }
4255
4256 if (m->m_pkthdr.csum_flags & CSUM_IP_CHECKED) {
4257 sum = !(m->m_pkthdr.csum_flags & CSUM_IP_VALID);
4258 } else {
4259 if (hlen == sizeof(struct ip)) {
4260 sum = in_cksum_hdr(ip);
4261 } else {
4262 sum = in_cksum(m, hlen);
4263 }
4264 }
4265 if (sum) {
4266 KMOD_IPSTAT_INC(ips_badsum);
4267 goto bad;
4268 }
4269
4270 /* Retrieve the packet length. */
4271 len = ntohs(ip->ip_len);
4272
4273 /*
4274 * Check for additional length bogosity
4275 */
4276 if (len < hlen) {
4277 KMOD_IPSTAT_INC(ips_badlen);
4278 goto bad;
4279 }
4280
4281 /*
4282 * Check that the amount of data in the buffers
4283 * is as at least much as the IP header would have us expect.
4284 * Drop packet if shorter than we expect.
4285 */
4286 if (m->m_pkthdr.len < len) {
4287 KMOD_IPSTAT_INC(ips_tooshort);
4288 goto bad;
4289 }
4290
4291 /* Checks out, proceed */
4292 *mp = m;
4293 return (0);
4294
4295 bad:
4296 *mp = m;
4297 return (-1);
4298 }
4299 #endif /* INET */
4300
4301 #ifdef INET6
4302 /*
4303 * Same as above, but for IPv6.
4304 * Cut-and-pasted from ip6_input.c.
4305 * XXX Should we update ip6stat, or not?
4306 */
4307 static int
bridge_ip6_checkbasic(struct mbuf ** mp)4308 bridge_ip6_checkbasic(struct mbuf **mp)
4309 {
4310 struct mbuf *m = *mp;
4311 struct ip6_hdr *ip6;
4312
4313 /*
4314 * If the IPv6 header is not aligned, slurp it up into a new
4315 * mbuf with space for link headers, in the event we forward
4316 * it. Otherwise, if it is aligned, make sure the entire base
4317 * IPv6 header is in the first mbuf of the chain.
4318 */
4319 if (IP6_HDR_ALIGNED_P(mtod(m, caddr_t)) == 0) {
4320 struct ifnet *inifp = m->m_pkthdr.rcvif;
4321 if ((m = m_copyup(m, sizeof(struct ip6_hdr),
4322 (max_linkhdr + 3) & ~3)) == NULL) {
4323 /* XXXJRT new stat, please */
4324 IP6STAT_INC(ip6s_toosmall);
4325 in6_ifstat_inc(inifp, ifs6_in_hdrerr);
4326 goto bad;
4327 }
4328 } else if (__predict_false(m->m_len < sizeof(struct ip6_hdr))) {
4329 struct ifnet *inifp = m->m_pkthdr.rcvif;
4330 if ((m = m_pullup(m, sizeof(struct ip6_hdr))) == NULL) {
4331 IP6STAT_INC(ip6s_toosmall);
4332 in6_ifstat_inc(inifp, ifs6_in_hdrerr);
4333 goto bad;
4334 }
4335 }
4336
4337 ip6 = mtod(m, struct ip6_hdr *);
4338
4339 if ((ip6->ip6_vfc & IPV6_VERSION_MASK) != IPV6_VERSION) {
4340 IP6STAT_INC(ip6s_badvers);
4341 in6_ifstat_inc(m->m_pkthdr.rcvif, ifs6_in_hdrerr);
4342 goto bad;
4343 }
4344
4345 /* Checks out, proceed */
4346 *mp = m;
4347 return (0);
4348
4349 bad:
4350 *mp = m;
4351 return (-1);
4352 }
4353 #endif /* INET6 */
4354
4355 #ifdef INET
4356 /*
4357 * bridge_fragment:
4358 *
4359 * Fragment mbuf chain in multiple packets and prepend ethernet header.
4360 */
4361 static int
bridge_fragment(struct ifnet * ifp,struct mbuf ** mp,struct ether_header * eh,int snap,struct llc * llc)4362 bridge_fragment(struct ifnet *ifp, struct mbuf **mp, struct ether_header *eh,
4363 int snap, struct llc *llc)
4364 {
4365 struct mbuf *m = *mp, *nextpkt = NULL, *mprev = NULL, *mcur = NULL;
4366 struct ip *ip;
4367 int error = -1;
4368
4369 if (m->m_len < sizeof(struct ip) &&
4370 (m = m_pullup(m, sizeof(struct ip))) == NULL)
4371 goto dropit;
4372 ip = mtod(m, struct ip *);
4373
4374 m->m_pkthdr.csum_flags |= CSUM_IP;
4375 error = ip_fragment(ip, &m, ifp->if_mtu, ifp->if_hwassist);
4376 if (error)
4377 goto dropit;
4378
4379 /*
4380 * Walk the chain and re-add the Ethernet header for
4381 * each mbuf packet.
4382 */
4383 for (mcur = m; mcur; mcur = mcur->m_nextpkt) {
4384 nextpkt = mcur->m_nextpkt;
4385 mcur->m_nextpkt = NULL;
4386 if (snap) {
4387 M_PREPEND(mcur, sizeof(struct llc), M_NOWAIT);
4388 if (mcur == NULL) {
4389 error = ENOBUFS;
4390 if (mprev != NULL)
4391 mprev->m_nextpkt = nextpkt;
4392 goto dropit;
4393 }
4394 bcopy(llc, mtod(mcur, caddr_t),sizeof(struct llc));
4395 }
4396
4397 M_PREPEND(mcur, ETHER_HDR_LEN, M_NOWAIT);
4398 if (mcur == NULL) {
4399 error = ENOBUFS;
4400 if (mprev != NULL)
4401 mprev->m_nextpkt = nextpkt;
4402 goto dropit;
4403 }
4404 bcopy(eh, mtod(mcur, caddr_t), ETHER_HDR_LEN);
4405
4406 /*
4407 * The previous two M_PREPEND could have inserted one or two
4408 * mbufs in front so we have to update the previous packet's
4409 * m_nextpkt.
4410 */
4411 mcur->m_nextpkt = nextpkt;
4412 if (mprev != NULL)
4413 mprev->m_nextpkt = mcur;
4414 else {
4415 /* The first mbuf in the original chain needs to be
4416 * updated. */
4417 *mp = mcur;
4418 }
4419 mprev = mcur;
4420 }
4421
4422 KMOD_IPSTAT_INC(ips_fragmented);
4423 return (error);
4424
4425 dropit:
4426 for (mcur = *mp; mcur; mcur = m) { /* droping the full packet chain */
4427 m = mcur->m_nextpkt;
4428 m_freem(mcur);
4429 }
4430 return (error);
4431 }
4432 #endif /* INET */
4433
4434 static void
bridge_linkstate(struct ifnet * ifp)4435 bridge_linkstate(struct ifnet *ifp)
4436 {
4437 struct bridge_softc *sc = NULL;
4438 struct bridge_iflist *bif;
4439 struct epoch_tracker et;
4440
4441 NET_EPOCH_ENTER(et);
4442
4443 bif = ifp->if_bridge;
4444 if (bif)
4445 sc = bif->bif_sc;
4446
4447 if (sc != NULL) {
4448 bridge_linkcheck(sc);
4449 bstp_linkstate(&bif->bif_stp);
4450 }
4451
4452 NET_EPOCH_EXIT(et);
4453 }
4454
4455 static void
bridge_linkcheck(struct bridge_softc * sc)4456 bridge_linkcheck(struct bridge_softc *sc)
4457 {
4458 struct bridge_iflist *bif;
4459 int new_link, hasls;
4460
4461 BRIDGE_LOCK_OR_NET_EPOCH_ASSERT(sc);
4462
4463 new_link = LINK_STATE_DOWN;
4464 hasls = 0;
4465 /* Our link is considered up if at least one of our ports is active */
4466 CK_LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
4467 if (bif->bif_ifp->if_capabilities & IFCAP_LINKSTATE)
4468 hasls++;
4469 if (bif->bif_ifp->if_link_state == LINK_STATE_UP) {
4470 new_link = LINK_STATE_UP;
4471 break;
4472 }
4473 }
4474 if (!CK_LIST_EMPTY(&sc->sc_iflist) && !hasls) {
4475 /* If no interfaces support link-state then we default to up */
4476 new_link = LINK_STATE_UP;
4477 }
4478 if_link_state_change(sc->sc_ifp, new_link);
4479 }
4480