1 /* $NetBSD: if_bridge.c,v 1.31 2005/06/01 19:45:34 jdc Exp $ */ 2 3 /* 4 * Copyright 2001 Wasabi Systems, Inc. 5 * All rights reserved. 6 * 7 * Written by Jason R. Thorpe for Wasabi Systems, Inc. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 3. All advertising materials mentioning features or use of this software 18 * must display the following acknowledgement: 19 * This product includes software developed for the NetBSD Project by 20 * Wasabi Systems, Inc. 21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse 22 * or promote products derived from this software without specific prior 23 * written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND 26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC 29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 35 * POSSIBILITY OF SUCH DAMAGE. 36 */ 37 38 /* 39 * Copyright (c) 1999, 2000 Jason L. Wright (jason@thought.net) 40 * All rights reserved. 41 * 42 * Redistribution and use in source and binary forms, with or without 43 * modification, are permitted provided that the following conditions 44 * are met: 45 * 1. Redistributions of source code must retain the above copyright 46 * notice, this list of conditions and the following disclaimer. 47 * 2. Redistributions in binary form must reproduce the above copyright 48 * notice, this list of conditions and the following disclaimer in the 49 * documentation and/or other materials provided with the distribution. 50 * 51 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 52 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 53 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 54 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, 55 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 56 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 57 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 58 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 59 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN 60 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 61 * POSSIBILITY OF SUCH DAMAGE. 62 * 63 * OpenBSD: if_bridge.c,v 1.60 2001/06/15 03:38:33 itojun Exp 64 */ 65 66 /* 67 * Network interface bridge support. 68 * 69 * TODO: 70 * 71 * - Currently only supports Ethernet-like interfaces (Ethernet, 72 * 802.11, VLANs on Ethernet, etc.) Figure out a nice way 73 * to bridge other types of interfaces (FDDI-FDDI, and maybe 74 * consider heterogeneous bridges). 75 */ 76 77 #include <sys/cdefs.h> 78 __FBSDID("$FreeBSD$"); 79 80 #include "opt_inet.h" 81 #include "opt_inet6.h" 82 83 #include <sys/param.h> 84 #include <sys/eventhandler.h> 85 #include <sys/mbuf.h> 86 #include <sys/malloc.h> 87 #include <sys/protosw.h> 88 #include <sys/systm.h> 89 #include <sys/jail.h> 90 #include <sys/time.h> 91 #include <sys/socket.h> /* for net/if.h */ 92 #include <sys/sockio.h> 93 #include <sys/ctype.h> /* string functions */ 94 #include <sys/kernel.h> 95 #include <sys/random.h> 96 #include <sys/syslog.h> 97 #include <sys/sysctl.h> 98 #include <vm/uma.h> 99 #include <sys/module.h> 100 #include <sys/priv.h> 101 #include <sys/proc.h> 102 #include <sys/lock.h> 103 #include <sys/mutex.h> 104 105 #include <net/bpf.h> 106 #include <net/if.h> 107 #include <net/if_clone.h> 108 #include <net/if_dl.h> 109 #include <net/if_types.h> 110 #include <net/if_var.h> 111 #include <net/pfil.h> 112 #include <net/vnet.h> 113 114 #include <netinet/in.h> 115 #include <netinet/in_systm.h> 116 #include <netinet/in_var.h> 117 #include <netinet/ip.h> 118 #include <netinet/ip_var.h> 119 #ifdef INET6 120 #include <netinet/ip6.h> 121 #include <netinet6/ip6_var.h> 122 #include <netinet6/in6_ifattach.h> 123 #endif 124 #if defined(INET) || defined(INET6) 125 #include <netinet/ip_carp.h> 126 #endif 127 #include <machine/in_cksum.h> 128 #include <netinet/if_ether.h> 129 #include <net/bridgestp.h> 130 #include <net/if_bridgevar.h> 131 #include <net/if_llc.h> 132 #include <net/if_vlan_var.h> 133 134 #include <net/route.h> 135 136 /* 137 * Size of the route hash table. Must be a power of two. 138 */ 139 #ifndef BRIDGE_RTHASH_SIZE 140 #define BRIDGE_RTHASH_SIZE 1024 141 #endif 142 143 #define BRIDGE_RTHASH_MASK (BRIDGE_RTHASH_SIZE - 1) 144 145 /* 146 * Default maximum number of addresses to cache. 147 */ 148 #ifndef BRIDGE_RTABLE_MAX 149 #define BRIDGE_RTABLE_MAX 2000 150 #endif 151 152 /* 153 * Timeout (in seconds) for entries learned dynamically. 154 */ 155 #ifndef BRIDGE_RTABLE_TIMEOUT 156 #define BRIDGE_RTABLE_TIMEOUT (20 * 60) /* same as ARP */ 157 #endif 158 159 /* 160 * Number of seconds between walks of the route list. 161 */ 162 #ifndef BRIDGE_RTABLE_PRUNE_PERIOD 163 #define BRIDGE_RTABLE_PRUNE_PERIOD (5 * 60) 164 #endif 165 166 /* 167 * List of capabilities to possibly mask on the member interface. 168 */ 169 #define BRIDGE_IFCAPS_MASK (IFCAP_TOE|IFCAP_TSO|IFCAP_TXCSUM|\ 170 IFCAP_TXCSUM_IPV6) 171 172 /* 173 * List of capabilities to strip 174 */ 175 #define BRIDGE_IFCAPS_STRIP IFCAP_LRO 176 177 /* 178 * Bridge interface list entry. 179 */ 180 struct bridge_iflist { 181 LIST_ENTRY(bridge_iflist) bif_next; 182 struct ifnet *bif_ifp; /* member if */ 183 struct bstp_port bif_stp; /* STP state */ 184 uint32_t bif_flags; /* member if flags */ 185 int bif_savedcaps; /* saved capabilities */ 186 uint32_t bif_addrmax; /* max # of addresses */ 187 uint32_t bif_addrcnt; /* cur. # of addresses */ 188 uint32_t bif_addrexceeded;/* # of address violations */ 189 }; 190 191 /* 192 * Bridge route node. 193 */ 194 struct bridge_rtnode { 195 LIST_ENTRY(bridge_rtnode) brt_hash; /* hash table linkage */ 196 LIST_ENTRY(bridge_rtnode) brt_list; /* list linkage */ 197 struct bridge_iflist *brt_dst; /* destination if */ 198 unsigned long brt_expire; /* expiration time */ 199 uint8_t brt_flags; /* address flags */ 200 uint8_t brt_addr[ETHER_ADDR_LEN]; 201 uint16_t brt_vlan; /* vlan id */ 202 }; 203 #define brt_ifp brt_dst->bif_ifp 204 205 /* 206 * Software state for each bridge. 207 */ 208 struct bridge_softc { 209 struct ifnet *sc_ifp; /* make this an interface */ 210 LIST_ENTRY(bridge_softc) sc_list; 211 struct mtx sc_mtx; 212 struct cv sc_cv; 213 uint32_t sc_brtmax; /* max # of addresses */ 214 uint32_t sc_brtcnt; /* cur. # of addresses */ 215 uint32_t sc_brttimeout; /* rt timeout in seconds */ 216 struct callout sc_brcallout; /* bridge callout */ 217 uint32_t sc_iflist_ref; /* refcount for sc_iflist */ 218 uint32_t sc_iflist_xcnt; /* refcount for sc_iflist */ 219 LIST_HEAD(, bridge_iflist) sc_iflist; /* member interface list */ 220 LIST_HEAD(, bridge_rtnode) *sc_rthash; /* our forwarding table */ 221 LIST_HEAD(, bridge_rtnode) sc_rtlist; /* list version of above */ 222 uint32_t sc_rthash_key; /* key for hash */ 223 LIST_HEAD(, bridge_iflist) sc_spanlist; /* span ports list */ 224 struct bstp_state sc_stp; /* STP state */ 225 uint32_t sc_brtexceeded; /* # of cache drops */ 226 struct ifnet *sc_ifaddr; /* member mac copied from */ 227 u_char sc_defaddr[6]; /* Default MAC address */ 228 }; 229 230 static VNET_DEFINE(struct mtx, bridge_list_mtx); 231 #define V_bridge_list_mtx VNET(bridge_list_mtx) 232 static eventhandler_tag bridge_detach_cookie; 233 234 int bridge_rtable_prune_period = BRIDGE_RTABLE_PRUNE_PERIOD; 235 236 uma_zone_t bridge_rtnode_zone; 237 238 static int bridge_clone_create(struct if_clone *, int, caddr_t); 239 static void bridge_clone_destroy(struct ifnet *); 240 241 static int bridge_ioctl(struct ifnet *, u_long, caddr_t); 242 static void bridge_mutecaps(struct bridge_softc *); 243 static void bridge_set_ifcap(struct bridge_softc *, struct bridge_iflist *, 244 int); 245 static void bridge_ifdetach(void *arg __unused, struct ifnet *); 246 static void bridge_init(void *); 247 static void bridge_dummynet(struct mbuf *, struct ifnet *); 248 static void bridge_stop(struct ifnet *, int); 249 static int bridge_transmit(struct ifnet *, struct mbuf *); 250 static void bridge_qflush(struct ifnet *); 251 static struct mbuf *bridge_input(struct ifnet *, struct mbuf *); 252 static int bridge_output(struct ifnet *, struct mbuf *, struct sockaddr *, 253 struct rtentry *); 254 static int bridge_enqueue(struct bridge_softc *, struct ifnet *, 255 struct mbuf *); 256 static void bridge_rtdelete(struct bridge_softc *, struct ifnet *ifp, int); 257 258 static void bridge_forward(struct bridge_softc *, struct bridge_iflist *, 259 struct mbuf *m); 260 261 static void bridge_timer(void *); 262 263 static void bridge_broadcast(struct bridge_softc *, struct ifnet *, 264 struct mbuf *, int); 265 static void bridge_span(struct bridge_softc *, struct mbuf *); 266 267 static int bridge_rtupdate(struct bridge_softc *, const uint8_t *, 268 uint16_t, struct bridge_iflist *, int, uint8_t); 269 static struct ifnet *bridge_rtlookup(struct bridge_softc *, const uint8_t *, 270 uint16_t); 271 static void bridge_rttrim(struct bridge_softc *); 272 static void bridge_rtage(struct bridge_softc *); 273 static void bridge_rtflush(struct bridge_softc *, int); 274 static int bridge_rtdaddr(struct bridge_softc *, const uint8_t *, 275 uint16_t); 276 277 static void bridge_rtable_init(struct bridge_softc *); 278 static void bridge_rtable_fini(struct bridge_softc *); 279 280 static int bridge_rtnode_addr_cmp(const uint8_t *, const uint8_t *); 281 static struct bridge_rtnode *bridge_rtnode_lookup(struct bridge_softc *, 282 const uint8_t *, uint16_t); 283 static int bridge_rtnode_insert(struct bridge_softc *, 284 struct bridge_rtnode *); 285 static void bridge_rtnode_destroy(struct bridge_softc *, 286 struct bridge_rtnode *); 287 static void bridge_rtable_expire(struct ifnet *, int); 288 static void bridge_state_change(struct ifnet *, int); 289 290 static struct bridge_iflist *bridge_lookup_member(struct bridge_softc *, 291 const char *name); 292 static struct bridge_iflist *bridge_lookup_member_if(struct bridge_softc *, 293 struct ifnet *ifp); 294 static void bridge_delete_member(struct bridge_softc *, 295 struct bridge_iflist *, int); 296 static void bridge_delete_span(struct bridge_softc *, 297 struct bridge_iflist *); 298 299 static int bridge_ioctl_add(struct bridge_softc *, void *); 300 static int bridge_ioctl_del(struct bridge_softc *, void *); 301 static int bridge_ioctl_gifflags(struct bridge_softc *, void *); 302 static int bridge_ioctl_sifflags(struct bridge_softc *, void *); 303 static int bridge_ioctl_scache(struct bridge_softc *, void *); 304 static int bridge_ioctl_gcache(struct bridge_softc *, void *); 305 static int bridge_ioctl_gifs(struct bridge_softc *, void *); 306 static int bridge_ioctl_rts(struct bridge_softc *, void *); 307 static int bridge_ioctl_saddr(struct bridge_softc *, void *); 308 static int bridge_ioctl_sto(struct bridge_softc *, void *); 309 static int bridge_ioctl_gto(struct bridge_softc *, void *); 310 static int bridge_ioctl_daddr(struct bridge_softc *, void *); 311 static int bridge_ioctl_flush(struct bridge_softc *, void *); 312 static int bridge_ioctl_gpri(struct bridge_softc *, void *); 313 static int bridge_ioctl_spri(struct bridge_softc *, void *); 314 static int bridge_ioctl_ght(struct bridge_softc *, void *); 315 static int bridge_ioctl_sht(struct bridge_softc *, void *); 316 static int bridge_ioctl_gfd(struct bridge_softc *, void *); 317 static int bridge_ioctl_sfd(struct bridge_softc *, void *); 318 static int bridge_ioctl_gma(struct bridge_softc *, void *); 319 static int bridge_ioctl_sma(struct bridge_softc *, void *); 320 static int bridge_ioctl_sifprio(struct bridge_softc *, void *); 321 static int bridge_ioctl_sifcost(struct bridge_softc *, void *); 322 static int bridge_ioctl_sifmaxaddr(struct bridge_softc *, void *); 323 static int bridge_ioctl_addspan(struct bridge_softc *, void *); 324 static int bridge_ioctl_delspan(struct bridge_softc *, void *); 325 static int bridge_ioctl_gbparam(struct bridge_softc *, void *); 326 static int bridge_ioctl_grte(struct bridge_softc *, void *); 327 static int bridge_ioctl_gifsstp(struct bridge_softc *, void *); 328 static int bridge_ioctl_sproto(struct bridge_softc *, void *); 329 static int bridge_ioctl_stxhc(struct bridge_softc *, void *); 330 static int bridge_pfil(struct mbuf **, struct ifnet *, struct ifnet *, 331 int); 332 static int bridge_ip_checkbasic(struct mbuf **mp); 333 #ifdef INET6 334 static int bridge_ip6_checkbasic(struct mbuf **mp); 335 #endif /* INET6 */ 336 static int bridge_fragment(struct ifnet *, struct mbuf **mp, 337 struct ether_header *, int, struct llc *); 338 static void bridge_linkstate(struct ifnet *ifp); 339 static void bridge_linkcheck(struct bridge_softc *sc); 340 341 extern void (*bridge_linkstate_p)(struct ifnet *ifp); 342 343 /* The default bridge vlan is 1 (IEEE 802.1Q-2003 Table 9-2) */ 344 #define VLANTAGOF(_m) \ 345 (_m->m_flags & M_VLANTAG) ? EVL_VLANOFTAG(_m->m_pkthdr.ether_vtag) : 1 346 347 static struct bstp_cb_ops bridge_ops = { 348 .bcb_state = bridge_state_change, 349 .bcb_rtage = bridge_rtable_expire 350 }; 351 352 SYSCTL_DECL(_net_link); 353 static SYSCTL_NODE(_net_link, IFT_BRIDGE, bridge, CTLFLAG_RW, 0, "Bridge"); 354 355 /* only pass IP[46] packets when pfil is enabled */ 356 static VNET_DEFINE(int, pfil_onlyip) = 1; 357 #define V_pfil_onlyip VNET(pfil_onlyip) 358 SYSCTL_INT(_net_link_bridge, OID_AUTO, pfil_onlyip, 359 CTLFLAG_RWTUN | CTLFLAG_VNET, &VNET_NAME(pfil_onlyip), 0, 360 "Only pass IP packets when pfil is enabled"); 361 362 /* run pfil hooks on the bridge interface */ 363 static VNET_DEFINE(int, pfil_bridge) = 1; 364 #define V_pfil_bridge VNET(pfil_bridge) 365 SYSCTL_INT(_net_link_bridge, OID_AUTO, pfil_bridge, 366 CTLFLAG_RWTUN | CTLFLAG_VNET, &VNET_NAME(pfil_bridge), 0, 367 "Packet filter on the bridge interface"); 368 369 /* layer2 filter with ipfw */ 370 static VNET_DEFINE(int, pfil_ipfw); 371 #define V_pfil_ipfw VNET(pfil_ipfw) 372 373 /* layer2 ARP filter with ipfw */ 374 static VNET_DEFINE(int, pfil_ipfw_arp); 375 #define V_pfil_ipfw_arp VNET(pfil_ipfw_arp) 376 SYSCTL_INT(_net_link_bridge, OID_AUTO, ipfw_arp, 377 CTLFLAG_RWTUN | CTLFLAG_VNET, &VNET_NAME(pfil_ipfw_arp), 0, 378 "Filter ARP packets through IPFW layer2"); 379 380 /* run pfil hooks on the member interface */ 381 static VNET_DEFINE(int, pfil_member) = 1; 382 #define V_pfil_member VNET(pfil_member) 383 SYSCTL_INT(_net_link_bridge, OID_AUTO, pfil_member, 384 CTLFLAG_RWTUN | CTLFLAG_VNET, &VNET_NAME(pfil_member), 0, 385 "Packet filter on the member interface"); 386 387 /* run pfil hooks on the physical interface for locally destined packets */ 388 static VNET_DEFINE(int, pfil_local_phys); 389 #define V_pfil_local_phys VNET(pfil_local_phys) 390 SYSCTL_INT(_net_link_bridge, OID_AUTO, pfil_local_phys, 391 CTLFLAG_RWTUN | CTLFLAG_VNET, &VNET_NAME(pfil_local_phys), 0, 392 "Packet filter on the physical interface for locally destined packets"); 393 394 /* log STP state changes */ 395 static VNET_DEFINE(int, log_stp); 396 #define V_log_stp VNET(log_stp) 397 SYSCTL_INT(_net_link_bridge, OID_AUTO, log_stp, 398 CTLFLAG_RWTUN | CTLFLAG_VNET, &VNET_NAME(log_stp), 0, 399 "Log STP state changes"); 400 401 /* share MAC with first bridge member */ 402 static VNET_DEFINE(int, bridge_inherit_mac); 403 #define V_bridge_inherit_mac VNET(bridge_inherit_mac) 404 SYSCTL_INT(_net_link_bridge, OID_AUTO, inherit_mac, 405 CTLFLAG_RWTUN | CTLFLAG_VNET, &VNET_NAME(bridge_inherit_mac), 0, 406 "Inherit MAC address from the first bridge member"); 407 408 static VNET_DEFINE(int, allow_llz_overlap) = 0; 409 #define V_allow_llz_overlap VNET(allow_llz_overlap) 410 SYSCTL_INT(_net_link_bridge, OID_AUTO, allow_llz_overlap, 411 CTLFLAG_RW | CTLFLAG_VNET, &VNET_NAME(allow_llz_overlap), 0, 412 "Allow overlap of link-local scope " 413 "zones of a bridge interface and the member interfaces"); 414 415 struct bridge_control { 416 int (*bc_func)(struct bridge_softc *, void *); 417 int bc_argsize; 418 int bc_flags; 419 }; 420 421 #define BC_F_COPYIN 0x01 /* copy arguments in */ 422 #define BC_F_COPYOUT 0x02 /* copy arguments out */ 423 #define BC_F_SUSER 0x04 /* do super-user check */ 424 425 const struct bridge_control bridge_control_table[] = { 426 { bridge_ioctl_add, sizeof(struct ifbreq), 427 BC_F_COPYIN|BC_F_SUSER }, 428 { bridge_ioctl_del, sizeof(struct ifbreq), 429 BC_F_COPYIN|BC_F_SUSER }, 430 431 { bridge_ioctl_gifflags, sizeof(struct ifbreq), 432 BC_F_COPYIN|BC_F_COPYOUT }, 433 { bridge_ioctl_sifflags, sizeof(struct ifbreq), 434 BC_F_COPYIN|BC_F_SUSER }, 435 436 { bridge_ioctl_scache, sizeof(struct ifbrparam), 437 BC_F_COPYIN|BC_F_SUSER }, 438 { bridge_ioctl_gcache, sizeof(struct ifbrparam), 439 BC_F_COPYOUT }, 440 441 { bridge_ioctl_gifs, sizeof(struct ifbifconf), 442 BC_F_COPYIN|BC_F_COPYOUT }, 443 { bridge_ioctl_rts, sizeof(struct ifbaconf), 444 BC_F_COPYIN|BC_F_COPYOUT }, 445 446 { bridge_ioctl_saddr, sizeof(struct ifbareq), 447 BC_F_COPYIN|BC_F_SUSER }, 448 449 { bridge_ioctl_sto, sizeof(struct ifbrparam), 450 BC_F_COPYIN|BC_F_SUSER }, 451 { bridge_ioctl_gto, sizeof(struct ifbrparam), 452 BC_F_COPYOUT }, 453 454 { bridge_ioctl_daddr, sizeof(struct ifbareq), 455 BC_F_COPYIN|BC_F_SUSER }, 456 457 { bridge_ioctl_flush, sizeof(struct ifbreq), 458 BC_F_COPYIN|BC_F_SUSER }, 459 460 { bridge_ioctl_gpri, sizeof(struct ifbrparam), 461 BC_F_COPYOUT }, 462 { bridge_ioctl_spri, sizeof(struct ifbrparam), 463 BC_F_COPYIN|BC_F_SUSER }, 464 465 { bridge_ioctl_ght, sizeof(struct ifbrparam), 466 BC_F_COPYOUT }, 467 { bridge_ioctl_sht, sizeof(struct ifbrparam), 468 BC_F_COPYIN|BC_F_SUSER }, 469 470 { bridge_ioctl_gfd, sizeof(struct ifbrparam), 471 BC_F_COPYOUT }, 472 { bridge_ioctl_sfd, sizeof(struct ifbrparam), 473 BC_F_COPYIN|BC_F_SUSER }, 474 475 { bridge_ioctl_gma, sizeof(struct ifbrparam), 476 BC_F_COPYOUT }, 477 { bridge_ioctl_sma, sizeof(struct ifbrparam), 478 BC_F_COPYIN|BC_F_SUSER }, 479 480 { bridge_ioctl_sifprio, sizeof(struct ifbreq), 481 BC_F_COPYIN|BC_F_SUSER }, 482 483 { bridge_ioctl_sifcost, sizeof(struct ifbreq), 484 BC_F_COPYIN|BC_F_SUSER }, 485 486 { bridge_ioctl_addspan, sizeof(struct ifbreq), 487 BC_F_COPYIN|BC_F_SUSER }, 488 { bridge_ioctl_delspan, sizeof(struct ifbreq), 489 BC_F_COPYIN|BC_F_SUSER }, 490 491 { bridge_ioctl_gbparam, sizeof(struct ifbropreq), 492 BC_F_COPYOUT }, 493 494 { bridge_ioctl_grte, sizeof(struct ifbrparam), 495 BC_F_COPYOUT }, 496 497 { bridge_ioctl_gifsstp, sizeof(struct ifbpstpconf), 498 BC_F_COPYIN|BC_F_COPYOUT }, 499 500 { bridge_ioctl_sproto, sizeof(struct ifbrparam), 501 BC_F_COPYIN|BC_F_SUSER }, 502 503 { bridge_ioctl_stxhc, sizeof(struct ifbrparam), 504 BC_F_COPYIN|BC_F_SUSER }, 505 506 { bridge_ioctl_sifmaxaddr, sizeof(struct ifbreq), 507 BC_F_COPYIN|BC_F_SUSER }, 508 509 }; 510 const int bridge_control_table_size = nitems(bridge_control_table); 511 512 static VNET_DEFINE(LIST_HEAD(, bridge_softc), bridge_list); 513 #define V_bridge_list VNET(bridge_list) 514 #define BRIDGE_LIST_LOCK_INIT(x) mtx_init(&V_bridge_list_mtx, \ 515 "if_bridge list", NULL, MTX_DEF) 516 #define BRIDGE_LIST_LOCK_DESTROY(x) mtx_destroy(&V_bridge_list_mtx) 517 #define BRIDGE_LIST_LOCK(x) mtx_lock(&V_bridge_list_mtx) 518 #define BRIDGE_LIST_UNLOCK(x) mtx_unlock(&V_bridge_list_mtx) 519 520 static VNET_DEFINE(struct if_clone *, bridge_cloner); 521 #define V_bridge_cloner VNET(bridge_cloner) 522 523 static const char bridge_name[] = "bridge"; 524 525 static void 526 vnet_bridge_init(const void *unused __unused) 527 { 528 529 BRIDGE_LIST_LOCK_INIT(); 530 LIST_INIT(&V_bridge_list); 531 V_bridge_cloner = if_clone_simple(bridge_name, 532 bridge_clone_create, bridge_clone_destroy, 0); 533 } 534 VNET_SYSINIT(vnet_bridge_init, SI_SUB_PROTO_IFATTACHDOMAIN, SI_ORDER_ANY, 535 vnet_bridge_init, NULL); 536 537 static void 538 vnet_bridge_uninit(const void *unused __unused) 539 { 540 541 if_clone_detach(V_bridge_cloner); 542 V_bridge_cloner = NULL; 543 BRIDGE_LIST_LOCK_DESTROY(); 544 } 545 VNET_SYSUNINIT(vnet_bridge_uninit, SI_SUB_PSEUDO, SI_ORDER_ANY, 546 vnet_bridge_uninit, NULL); 547 548 static int 549 bridge_modevent(module_t mod, int type, void *data) 550 { 551 552 switch (type) { 553 case MOD_LOAD: 554 bridge_rtnode_zone = uma_zcreate("bridge_rtnode", 555 sizeof(struct bridge_rtnode), NULL, NULL, NULL, NULL, 556 UMA_ALIGN_PTR, 0); 557 bridge_input_p = bridge_input; 558 bridge_output_p = bridge_output; 559 bridge_dn_p = bridge_dummynet; 560 bridge_linkstate_p = bridge_linkstate; 561 bridge_detach_cookie = EVENTHANDLER_REGISTER( 562 ifnet_departure_event, bridge_ifdetach, NULL, 563 EVENTHANDLER_PRI_ANY); 564 break; 565 case MOD_UNLOAD: 566 EVENTHANDLER_DEREGISTER(ifnet_departure_event, 567 bridge_detach_cookie); 568 uma_zdestroy(bridge_rtnode_zone); 569 bridge_input_p = NULL; 570 bridge_output_p = NULL; 571 bridge_dn_p = NULL; 572 bridge_linkstate_p = NULL; 573 break; 574 default: 575 return (EOPNOTSUPP); 576 } 577 return (0); 578 } 579 580 static moduledata_t bridge_mod = { 581 "if_bridge", 582 bridge_modevent, 583 0 584 }; 585 586 DECLARE_MODULE(if_bridge, bridge_mod, SI_SUB_PSEUDO, SI_ORDER_ANY); 587 MODULE_DEPEND(if_bridge, bridgestp, 1, 1, 1); 588 589 /* 590 * handler for net.link.bridge.ipfw 591 */ 592 static int 593 sysctl_pfil_ipfw(SYSCTL_HANDLER_ARGS) 594 { 595 int enable = V_pfil_ipfw; 596 int error; 597 598 error = sysctl_handle_int(oidp, &enable, 0, req); 599 enable &= 1; 600 601 if (enable != V_pfil_ipfw) { 602 V_pfil_ipfw = enable; 603 604 /* 605 * Disable pfil so that ipfw doesnt run twice, if the user 606 * really wants both then they can re-enable pfil_bridge and/or 607 * pfil_member. Also allow non-ip packets as ipfw can filter by 608 * layer2 type. 609 */ 610 if (V_pfil_ipfw) { 611 V_pfil_onlyip = 0; 612 V_pfil_bridge = 0; 613 V_pfil_member = 0; 614 } 615 } 616 617 return (error); 618 } 619 SYSCTL_PROC(_net_link_bridge, OID_AUTO, ipfw, 620 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_VNET, 621 &VNET_NAME(pfil_ipfw), 0, &sysctl_pfil_ipfw, "I", 622 "Layer2 filter with IPFW"); 623 624 /* 625 * bridge_clone_create: 626 * 627 * Create a new bridge instance. 628 */ 629 static int 630 bridge_clone_create(struct if_clone *ifc, int unit, caddr_t params) 631 { 632 struct bridge_softc *sc, *sc2; 633 struct ifnet *bifp, *ifp; 634 int fb, retry; 635 unsigned long hostid; 636 637 sc = malloc(sizeof(*sc), M_DEVBUF, M_WAITOK|M_ZERO); 638 ifp = sc->sc_ifp = if_alloc(IFT_ETHER); 639 if (ifp == NULL) { 640 free(sc, M_DEVBUF); 641 return (ENOSPC); 642 } 643 644 BRIDGE_LOCK_INIT(sc); 645 sc->sc_brtmax = BRIDGE_RTABLE_MAX; 646 sc->sc_brttimeout = BRIDGE_RTABLE_TIMEOUT; 647 648 /* Initialize our routing table. */ 649 bridge_rtable_init(sc); 650 651 callout_init_mtx(&sc->sc_brcallout, &sc->sc_mtx, 0); 652 653 LIST_INIT(&sc->sc_iflist); 654 LIST_INIT(&sc->sc_spanlist); 655 656 ifp->if_softc = sc; 657 if_initname(ifp, bridge_name, unit); 658 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 659 ifp->if_ioctl = bridge_ioctl; 660 ifp->if_transmit = bridge_transmit; 661 ifp->if_qflush = bridge_qflush; 662 ifp->if_init = bridge_init; 663 ifp->if_type = IFT_BRIDGE; 664 665 /* 666 * Generate an ethernet address with a locally administered address. 667 * 668 * Since we are using random ethernet addresses for the bridge, it is 669 * possible that we might have address collisions, so make sure that 670 * this hardware address isn't already in use on another bridge. 671 * The first try uses the hostid and falls back to arc4rand(). 672 */ 673 fb = 0; 674 getcredhostid(curthread->td_ucred, &hostid); 675 do { 676 if (fb || hostid == 0) { 677 arc4rand(sc->sc_defaddr, ETHER_ADDR_LEN, 1); 678 sc->sc_defaddr[0] &= ~1;/* clear multicast bit */ 679 sc->sc_defaddr[0] |= 2; /* set the LAA bit */ 680 } else { 681 sc->sc_defaddr[0] = 0x2; 682 sc->sc_defaddr[1] = (hostid >> 24) & 0xff; 683 sc->sc_defaddr[2] = (hostid >> 16) & 0xff; 684 sc->sc_defaddr[3] = (hostid >> 8 ) & 0xff; 685 sc->sc_defaddr[4] = hostid & 0xff; 686 sc->sc_defaddr[5] = ifp->if_dunit & 0xff; 687 } 688 689 fb = 1; 690 retry = 0; 691 BRIDGE_LIST_LOCK(); 692 LIST_FOREACH(sc2, &V_bridge_list, sc_list) { 693 bifp = sc2->sc_ifp; 694 if (memcmp(sc->sc_defaddr, 695 IF_LLADDR(bifp), ETHER_ADDR_LEN) == 0) { 696 retry = 1; 697 break; 698 } 699 } 700 BRIDGE_LIST_UNLOCK(); 701 } while (retry == 1); 702 703 bstp_attach(&sc->sc_stp, &bridge_ops); 704 ether_ifattach(ifp, sc->sc_defaddr); 705 /* Now undo some of the damage... */ 706 ifp->if_baudrate = 0; 707 ifp->if_type = IFT_BRIDGE; 708 709 BRIDGE_LIST_LOCK(); 710 LIST_INSERT_HEAD(&V_bridge_list, sc, sc_list); 711 BRIDGE_LIST_UNLOCK(); 712 713 return (0); 714 } 715 716 /* 717 * bridge_clone_destroy: 718 * 719 * Destroy a bridge instance. 720 */ 721 static void 722 bridge_clone_destroy(struct ifnet *ifp) 723 { 724 struct bridge_softc *sc = ifp->if_softc; 725 struct bridge_iflist *bif; 726 727 BRIDGE_LOCK(sc); 728 729 bridge_stop(ifp, 1); 730 ifp->if_flags &= ~IFF_UP; 731 732 while ((bif = LIST_FIRST(&sc->sc_iflist)) != NULL) 733 bridge_delete_member(sc, bif, 0); 734 735 while ((bif = LIST_FIRST(&sc->sc_spanlist)) != NULL) { 736 bridge_delete_span(sc, bif); 737 } 738 739 BRIDGE_UNLOCK(sc); 740 741 callout_drain(&sc->sc_brcallout); 742 743 BRIDGE_LIST_LOCK(); 744 LIST_REMOVE(sc, sc_list); 745 BRIDGE_LIST_UNLOCK(); 746 747 bstp_detach(&sc->sc_stp); 748 ether_ifdetach(ifp); 749 if_free(ifp); 750 751 /* Tear down the routing table. */ 752 bridge_rtable_fini(sc); 753 754 BRIDGE_LOCK_DESTROY(sc); 755 free(sc, M_DEVBUF); 756 } 757 758 /* 759 * bridge_ioctl: 760 * 761 * Handle a control request from the operator. 762 */ 763 static int 764 bridge_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 765 { 766 struct bridge_softc *sc = ifp->if_softc; 767 struct ifreq *ifr = (struct ifreq *)data; 768 struct bridge_iflist *bif; 769 struct thread *td = curthread; 770 union { 771 struct ifbreq ifbreq; 772 struct ifbifconf ifbifconf; 773 struct ifbareq ifbareq; 774 struct ifbaconf ifbaconf; 775 struct ifbrparam ifbrparam; 776 struct ifbropreq ifbropreq; 777 } args; 778 struct ifdrv *ifd = (struct ifdrv *) data; 779 const struct bridge_control *bc; 780 int error = 0; 781 782 switch (cmd) { 783 784 case SIOCADDMULTI: 785 case SIOCDELMULTI: 786 break; 787 788 case SIOCGDRVSPEC: 789 case SIOCSDRVSPEC: 790 if (ifd->ifd_cmd >= bridge_control_table_size) { 791 error = EINVAL; 792 break; 793 } 794 bc = &bridge_control_table[ifd->ifd_cmd]; 795 796 if (cmd == SIOCGDRVSPEC && 797 (bc->bc_flags & BC_F_COPYOUT) == 0) { 798 error = EINVAL; 799 break; 800 } 801 else if (cmd == SIOCSDRVSPEC && 802 (bc->bc_flags & BC_F_COPYOUT) != 0) { 803 error = EINVAL; 804 break; 805 } 806 807 if (bc->bc_flags & BC_F_SUSER) { 808 error = priv_check(td, PRIV_NET_BRIDGE); 809 if (error) 810 break; 811 } 812 813 if (ifd->ifd_len != bc->bc_argsize || 814 ifd->ifd_len > sizeof(args)) { 815 error = EINVAL; 816 break; 817 } 818 819 bzero(&args, sizeof(args)); 820 if (bc->bc_flags & BC_F_COPYIN) { 821 error = copyin(ifd->ifd_data, &args, ifd->ifd_len); 822 if (error) 823 break; 824 } 825 826 BRIDGE_LOCK(sc); 827 error = (*bc->bc_func)(sc, &args); 828 BRIDGE_UNLOCK(sc); 829 if (error) 830 break; 831 832 if (bc->bc_flags & BC_F_COPYOUT) 833 error = copyout(&args, ifd->ifd_data, ifd->ifd_len); 834 835 break; 836 837 case SIOCSIFFLAGS: 838 if (!(ifp->if_flags & IFF_UP) && 839 (ifp->if_drv_flags & IFF_DRV_RUNNING)) { 840 /* 841 * If interface is marked down and it is running, 842 * then stop and disable it. 843 */ 844 BRIDGE_LOCK(sc); 845 bridge_stop(ifp, 1); 846 BRIDGE_UNLOCK(sc); 847 } else if ((ifp->if_flags & IFF_UP) && 848 !(ifp->if_drv_flags & IFF_DRV_RUNNING)) { 849 /* 850 * If interface is marked up and it is stopped, then 851 * start it. 852 */ 853 (*ifp->if_init)(sc); 854 } 855 break; 856 857 case SIOCSIFMTU: 858 if (ifr->ifr_mtu < 576) { 859 error = EINVAL; 860 break; 861 } 862 if (LIST_EMPTY(&sc->sc_iflist)) { 863 sc->sc_ifp->if_mtu = ifr->ifr_mtu; 864 break; 865 } 866 BRIDGE_LOCK(sc); 867 LIST_FOREACH(bif, &sc->sc_iflist, bif_next) { 868 if (bif->bif_ifp->if_mtu != ifr->ifr_mtu) { 869 log(LOG_NOTICE, "%s: invalid MTU: %u(%s)" 870 " != %d\n", sc->sc_ifp->if_xname, 871 bif->bif_ifp->if_mtu, 872 bif->bif_ifp->if_xname, ifr->ifr_mtu); 873 error = EINVAL; 874 break; 875 } 876 } 877 if (!error) 878 sc->sc_ifp->if_mtu = ifr->ifr_mtu; 879 BRIDGE_UNLOCK(sc); 880 break; 881 default: 882 /* 883 * drop the lock as ether_ioctl() will call bridge_start() and 884 * cause the lock to be recursed. 885 */ 886 error = ether_ioctl(ifp, cmd, data); 887 break; 888 } 889 890 return (error); 891 } 892 893 /* 894 * bridge_mutecaps: 895 * 896 * Clear or restore unwanted capabilities on the member interface 897 */ 898 static void 899 bridge_mutecaps(struct bridge_softc *sc) 900 { 901 struct bridge_iflist *bif; 902 int enabled, mask; 903 904 /* Initial bitmask of capabilities to test */ 905 mask = BRIDGE_IFCAPS_MASK; 906 907 LIST_FOREACH(bif, &sc->sc_iflist, bif_next) { 908 /* Every member must support it or its disabled */ 909 mask &= bif->bif_savedcaps; 910 } 911 912 BRIDGE_XLOCK(sc); 913 LIST_FOREACH(bif, &sc->sc_iflist, bif_next) { 914 enabled = bif->bif_ifp->if_capenable; 915 enabled &= ~BRIDGE_IFCAPS_STRIP; 916 /* strip off mask bits and enable them again if allowed */ 917 enabled &= ~BRIDGE_IFCAPS_MASK; 918 enabled |= mask; 919 BRIDGE_UNLOCK(sc); 920 bridge_set_ifcap(sc, bif, enabled); 921 BRIDGE_LOCK(sc); 922 } 923 BRIDGE_XDROP(sc); 924 925 } 926 927 static void 928 bridge_set_ifcap(struct bridge_softc *sc, struct bridge_iflist *bif, int set) 929 { 930 struct ifnet *ifp = bif->bif_ifp; 931 struct ifreq ifr; 932 int error; 933 934 BRIDGE_UNLOCK_ASSERT(sc); 935 936 bzero(&ifr, sizeof(ifr)); 937 ifr.ifr_reqcap = set; 938 939 if (ifp->if_capenable != set) { 940 error = (*ifp->if_ioctl)(ifp, SIOCSIFCAP, (caddr_t)&ifr); 941 if (error) 942 if_printf(sc->sc_ifp, 943 "error setting interface capabilities on %s\n", 944 ifp->if_xname); 945 } 946 } 947 948 /* 949 * bridge_lookup_member: 950 * 951 * Lookup a bridge member interface. 952 */ 953 static struct bridge_iflist * 954 bridge_lookup_member(struct bridge_softc *sc, const char *name) 955 { 956 struct bridge_iflist *bif; 957 struct ifnet *ifp; 958 959 BRIDGE_LOCK_ASSERT(sc); 960 961 LIST_FOREACH(bif, &sc->sc_iflist, bif_next) { 962 ifp = bif->bif_ifp; 963 if (strcmp(ifp->if_xname, name) == 0) 964 return (bif); 965 } 966 967 return (NULL); 968 } 969 970 /* 971 * bridge_lookup_member_if: 972 * 973 * Lookup a bridge member interface by ifnet*. 974 */ 975 static struct bridge_iflist * 976 bridge_lookup_member_if(struct bridge_softc *sc, struct ifnet *member_ifp) 977 { 978 struct bridge_iflist *bif; 979 980 BRIDGE_LOCK_ASSERT(sc); 981 982 LIST_FOREACH(bif, &sc->sc_iflist, bif_next) { 983 if (bif->bif_ifp == member_ifp) 984 return (bif); 985 } 986 987 return (NULL); 988 } 989 990 /* 991 * bridge_delete_member: 992 * 993 * Delete the specified member interface. 994 */ 995 static void 996 bridge_delete_member(struct bridge_softc *sc, struct bridge_iflist *bif, 997 int gone) 998 { 999 struct ifnet *ifs = bif->bif_ifp; 1000 struct ifnet *fif = NULL; 1001 1002 BRIDGE_LOCK_ASSERT(sc); 1003 1004 if (bif->bif_flags & IFBIF_STP) 1005 bstp_disable(&bif->bif_stp); 1006 1007 ifs->if_bridge = NULL; 1008 BRIDGE_XLOCK(sc); 1009 LIST_REMOVE(bif, bif_next); 1010 BRIDGE_XDROP(sc); 1011 1012 /* 1013 * If removing the interface that gave the bridge its mac address, set 1014 * the mac address of the bridge to the address of the next member, or 1015 * to its default address if no members are left. 1016 */ 1017 if (V_bridge_inherit_mac && sc->sc_ifaddr == ifs) { 1018 if (LIST_EMPTY(&sc->sc_iflist)) { 1019 bcopy(sc->sc_defaddr, 1020 IF_LLADDR(sc->sc_ifp), ETHER_ADDR_LEN); 1021 sc->sc_ifaddr = NULL; 1022 } else { 1023 fif = LIST_FIRST(&sc->sc_iflist)->bif_ifp; 1024 bcopy(IF_LLADDR(fif), 1025 IF_LLADDR(sc->sc_ifp), ETHER_ADDR_LEN); 1026 sc->sc_ifaddr = fif; 1027 } 1028 EVENTHANDLER_INVOKE(iflladdr_event, sc->sc_ifp); 1029 } 1030 1031 bridge_linkcheck(sc); 1032 bridge_mutecaps(sc); /* recalcuate now this interface is removed */ 1033 bridge_rtdelete(sc, ifs, IFBF_FLUSHALL); 1034 KASSERT(bif->bif_addrcnt == 0, 1035 ("%s: %d bridge routes referenced", __func__, bif->bif_addrcnt)); 1036 1037 BRIDGE_UNLOCK(sc); 1038 if (!gone) { 1039 switch (ifs->if_type) { 1040 case IFT_ETHER: 1041 case IFT_L2VLAN: 1042 /* 1043 * Take the interface out of promiscuous mode, but only 1044 * if it was promiscuous in the first place. It might 1045 * not be if we're in the bridge_ioctl_add() error path. 1046 */ 1047 if (ifs->if_flags & IFF_PROMISC) 1048 (void) ifpromisc(ifs, 0); 1049 break; 1050 1051 case IFT_GIF: 1052 break; 1053 1054 default: 1055 #ifdef DIAGNOSTIC 1056 panic("bridge_delete_member: impossible"); 1057 #endif 1058 break; 1059 } 1060 /* reneable any interface capabilities */ 1061 bridge_set_ifcap(sc, bif, bif->bif_savedcaps); 1062 } 1063 bstp_destroy(&bif->bif_stp); /* prepare to free */ 1064 BRIDGE_LOCK(sc); 1065 free(bif, M_DEVBUF); 1066 } 1067 1068 /* 1069 * bridge_delete_span: 1070 * 1071 * Delete the specified span interface. 1072 */ 1073 static void 1074 bridge_delete_span(struct bridge_softc *sc, struct bridge_iflist *bif) 1075 { 1076 BRIDGE_LOCK_ASSERT(sc); 1077 1078 KASSERT(bif->bif_ifp->if_bridge == NULL, 1079 ("%s: not a span interface", __func__)); 1080 1081 LIST_REMOVE(bif, bif_next); 1082 free(bif, M_DEVBUF); 1083 } 1084 1085 static int 1086 bridge_ioctl_add(struct bridge_softc *sc, void *arg) 1087 { 1088 struct ifbreq *req = arg; 1089 struct bridge_iflist *bif = NULL; 1090 struct ifnet *ifs; 1091 int error = 0; 1092 1093 ifs = ifunit(req->ifbr_ifsname); 1094 if (ifs == NULL) 1095 return (ENOENT); 1096 if (ifs->if_ioctl == NULL) /* must be supported */ 1097 return (EINVAL); 1098 1099 /* If it's in the span list, it can't be a member. */ 1100 LIST_FOREACH(bif, &sc->sc_spanlist, bif_next) 1101 if (ifs == bif->bif_ifp) 1102 return (EBUSY); 1103 1104 if (ifs->if_bridge == sc) 1105 return (EEXIST); 1106 1107 if (ifs->if_bridge != NULL) 1108 return (EBUSY); 1109 1110 switch (ifs->if_type) { 1111 case IFT_ETHER: 1112 case IFT_L2VLAN: 1113 case IFT_GIF: 1114 /* permitted interface types */ 1115 break; 1116 default: 1117 return (EINVAL); 1118 } 1119 1120 #ifdef INET6 1121 /* 1122 * Two valid inet6 addresses with link-local scope must not be 1123 * on the parent interface and the member interfaces at the 1124 * same time. This restriction is needed to prevent violation 1125 * of link-local scope zone. Attempts to add a member 1126 * interface which has inet6 addresses when the parent has 1127 * inet6 triggers removal of all inet6 addresses on the member 1128 * interface. 1129 */ 1130 1131 /* Check if the parent interface has a link-local scope addr. */ 1132 if (V_allow_llz_overlap == 0 && 1133 in6ifa_llaonifp(sc->sc_ifp) != NULL) { 1134 /* 1135 * If any, remove all inet6 addresses from the member 1136 * interfaces. 1137 */ 1138 BRIDGE_XLOCK(sc); 1139 LIST_FOREACH(bif, &sc->sc_iflist, bif_next) { 1140 if (in6ifa_llaonifp(bif->bif_ifp)) { 1141 BRIDGE_UNLOCK(sc); 1142 in6_ifdetach(bif->bif_ifp); 1143 BRIDGE_LOCK(sc); 1144 if_printf(sc->sc_ifp, 1145 "IPv6 addresses on %s have been removed " 1146 "before adding it as a member to prevent " 1147 "IPv6 address scope violation.\n", 1148 bif->bif_ifp->if_xname); 1149 } 1150 } 1151 BRIDGE_XDROP(sc); 1152 if (in6ifa_llaonifp(ifs)) { 1153 BRIDGE_UNLOCK(sc); 1154 in6_ifdetach(ifs); 1155 BRIDGE_LOCK(sc); 1156 if_printf(sc->sc_ifp, 1157 "IPv6 addresses on %s have been removed " 1158 "before adding it as a member to prevent " 1159 "IPv6 address scope violation.\n", 1160 ifs->if_xname); 1161 } 1162 } 1163 #endif 1164 /* Allow the first Ethernet member to define the MTU */ 1165 if (LIST_EMPTY(&sc->sc_iflist)) 1166 sc->sc_ifp->if_mtu = ifs->if_mtu; 1167 else if (sc->sc_ifp->if_mtu != ifs->if_mtu) { 1168 if_printf(sc->sc_ifp, "invalid MTU: %u(%s) != %u\n", 1169 ifs->if_mtu, ifs->if_xname, sc->sc_ifp->if_mtu); 1170 return (EINVAL); 1171 } 1172 1173 bif = malloc(sizeof(*bif), M_DEVBUF, M_NOWAIT|M_ZERO); 1174 if (bif == NULL) 1175 return (ENOMEM); 1176 1177 bif->bif_ifp = ifs; 1178 bif->bif_flags = IFBIF_LEARNING | IFBIF_DISCOVER; 1179 bif->bif_savedcaps = ifs->if_capenable; 1180 1181 /* 1182 * Assign the interface's MAC address to the bridge if it's the first 1183 * member and the MAC address of the bridge has not been changed from 1184 * the default randomly generated one. 1185 */ 1186 if (V_bridge_inherit_mac && LIST_EMPTY(&sc->sc_iflist) && 1187 !memcmp(IF_LLADDR(sc->sc_ifp), sc->sc_defaddr, ETHER_ADDR_LEN)) { 1188 bcopy(IF_LLADDR(ifs), IF_LLADDR(sc->sc_ifp), ETHER_ADDR_LEN); 1189 sc->sc_ifaddr = ifs; 1190 EVENTHANDLER_INVOKE(iflladdr_event, sc->sc_ifp); 1191 } 1192 1193 ifs->if_bridge = sc; 1194 bstp_create(&sc->sc_stp, &bif->bif_stp, bif->bif_ifp); 1195 /* 1196 * XXX: XLOCK HERE!?! 1197 * 1198 * NOTE: insert_***HEAD*** should be safe for the traversals. 1199 */ 1200 LIST_INSERT_HEAD(&sc->sc_iflist, bif, bif_next); 1201 1202 /* Set interface capabilities to the intersection set of all members */ 1203 bridge_mutecaps(sc); 1204 bridge_linkcheck(sc); 1205 1206 /* Place the interface into promiscuous mode */ 1207 switch (ifs->if_type) { 1208 case IFT_ETHER: 1209 case IFT_L2VLAN: 1210 BRIDGE_UNLOCK(sc); 1211 error = ifpromisc(ifs, 1); 1212 BRIDGE_LOCK(sc); 1213 break; 1214 } 1215 1216 if (error) 1217 bridge_delete_member(sc, bif, 0); 1218 return (error); 1219 } 1220 1221 static int 1222 bridge_ioctl_del(struct bridge_softc *sc, void *arg) 1223 { 1224 struct ifbreq *req = arg; 1225 struct bridge_iflist *bif; 1226 1227 bif = bridge_lookup_member(sc, req->ifbr_ifsname); 1228 if (bif == NULL) 1229 return (ENOENT); 1230 1231 bridge_delete_member(sc, bif, 0); 1232 1233 return (0); 1234 } 1235 1236 static int 1237 bridge_ioctl_gifflags(struct bridge_softc *sc, void *arg) 1238 { 1239 struct ifbreq *req = arg; 1240 struct bridge_iflist *bif; 1241 struct bstp_port *bp; 1242 1243 bif = bridge_lookup_member(sc, req->ifbr_ifsname); 1244 if (bif == NULL) 1245 return (ENOENT); 1246 1247 bp = &bif->bif_stp; 1248 req->ifbr_ifsflags = bif->bif_flags; 1249 req->ifbr_state = bp->bp_state; 1250 req->ifbr_priority = bp->bp_priority; 1251 req->ifbr_path_cost = bp->bp_path_cost; 1252 req->ifbr_portno = bif->bif_ifp->if_index & 0xfff; 1253 req->ifbr_proto = bp->bp_protover; 1254 req->ifbr_role = bp->bp_role; 1255 req->ifbr_stpflags = bp->bp_flags; 1256 req->ifbr_addrcnt = bif->bif_addrcnt; 1257 req->ifbr_addrmax = bif->bif_addrmax; 1258 req->ifbr_addrexceeded = bif->bif_addrexceeded; 1259 1260 /* Copy STP state options as flags */ 1261 if (bp->bp_operedge) 1262 req->ifbr_ifsflags |= IFBIF_BSTP_EDGE; 1263 if (bp->bp_flags & BSTP_PORT_AUTOEDGE) 1264 req->ifbr_ifsflags |= IFBIF_BSTP_AUTOEDGE; 1265 if (bp->bp_ptp_link) 1266 req->ifbr_ifsflags |= IFBIF_BSTP_PTP; 1267 if (bp->bp_flags & BSTP_PORT_AUTOPTP) 1268 req->ifbr_ifsflags |= IFBIF_BSTP_AUTOPTP; 1269 if (bp->bp_flags & BSTP_PORT_ADMEDGE) 1270 req->ifbr_ifsflags |= IFBIF_BSTP_ADMEDGE; 1271 if (bp->bp_flags & BSTP_PORT_ADMCOST) 1272 req->ifbr_ifsflags |= IFBIF_BSTP_ADMCOST; 1273 return (0); 1274 } 1275 1276 static int 1277 bridge_ioctl_sifflags(struct bridge_softc *sc, void *arg) 1278 { 1279 struct ifbreq *req = arg; 1280 struct bridge_iflist *bif; 1281 struct bstp_port *bp; 1282 int error; 1283 1284 bif = bridge_lookup_member(sc, req->ifbr_ifsname); 1285 if (bif == NULL) 1286 return (ENOENT); 1287 bp = &bif->bif_stp; 1288 1289 if (req->ifbr_ifsflags & IFBIF_SPAN) 1290 /* SPAN is readonly */ 1291 return (EINVAL); 1292 1293 if (req->ifbr_ifsflags & IFBIF_STP) { 1294 if ((bif->bif_flags & IFBIF_STP) == 0) { 1295 error = bstp_enable(&bif->bif_stp); 1296 if (error) 1297 return (error); 1298 } 1299 } else { 1300 if ((bif->bif_flags & IFBIF_STP) != 0) 1301 bstp_disable(&bif->bif_stp); 1302 } 1303 1304 /* Pass on STP flags */ 1305 bstp_set_edge(bp, req->ifbr_ifsflags & IFBIF_BSTP_EDGE ? 1 : 0); 1306 bstp_set_autoedge(bp, req->ifbr_ifsflags & IFBIF_BSTP_AUTOEDGE ? 1 : 0); 1307 bstp_set_ptp(bp, req->ifbr_ifsflags & IFBIF_BSTP_PTP ? 1 : 0); 1308 bstp_set_autoptp(bp, req->ifbr_ifsflags & IFBIF_BSTP_AUTOPTP ? 1 : 0); 1309 1310 /* Save the bits relating to the bridge */ 1311 bif->bif_flags = req->ifbr_ifsflags & IFBIFMASK; 1312 1313 return (0); 1314 } 1315 1316 static int 1317 bridge_ioctl_scache(struct bridge_softc *sc, void *arg) 1318 { 1319 struct ifbrparam *param = arg; 1320 1321 sc->sc_brtmax = param->ifbrp_csize; 1322 bridge_rttrim(sc); 1323 1324 return (0); 1325 } 1326 1327 static int 1328 bridge_ioctl_gcache(struct bridge_softc *sc, void *arg) 1329 { 1330 struct ifbrparam *param = arg; 1331 1332 param->ifbrp_csize = sc->sc_brtmax; 1333 1334 return (0); 1335 } 1336 1337 static int 1338 bridge_ioctl_gifs(struct bridge_softc *sc, void *arg) 1339 { 1340 struct ifbifconf *bifc = arg; 1341 struct bridge_iflist *bif; 1342 struct ifbreq breq; 1343 char *buf, *outbuf; 1344 int count, buflen, len, error = 0; 1345 1346 count = 0; 1347 LIST_FOREACH(bif, &sc->sc_iflist, bif_next) 1348 count++; 1349 LIST_FOREACH(bif, &sc->sc_spanlist, bif_next) 1350 count++; 1351 1352 buflen = sizeof(breq) * count; 1353 if (bifc->ifbic_len == 0) { 1354 bifc->ifbic_len = buflen; 1355 return (0); 1356 } 1357 BRIDGE_UNLOCK(sc); 1358 outbuf = malloc(buflen, M_TEMP, M_WAITOK | M_ZERO); 1359 BRIDGE_LOCK(sc); 1360 1361 count = 0; 1362 buf = outbuf; 1363 len = min(bifc->ifbic_len, buflen); 1364 bzero(&breq, sizeof(breq)); 1365 LIST_FOREACH(bif, &sc->sc_iflist, bif_next) { 1366 if (len < sizeof(breq)) 1367 break; 1368 1369 strlcpy(breq.ifbr_ifsname, bif->bif_ifp->if_xname, 1370 sizeof(breq.ifbr_ifsname)); 1371 /* Fill in the ifbreq structure */ 1372 error = bridge_ioctl_gifflags(sc, &breq); 1373 if (error) 1374 break; 1375 memcpy(buf, &breq, sizeof(breq)); 1376 count++; 1377 buf += sizeof(breq); 1378 len -= sizeof(breq); 1379 } 1380 LIST_FOREACH(bif, &sc->sc_spanlist, bif_next) { 1381 if (len < sizeof(breq)) 1382 break; 1383 1384 strlcpy(breq.ifbr_ifsname, bif->bif_ifp->if_xname, 1385 sizeof(breq.ifbr_ifsname)); 1386 breq.ifbr_ifsflags = bif->bif_flags; 1387 breq.ifbr_portno = bif->bif_ifp->if_index & 0xfff; 1388 memcpy(buf, &breq, sizeof(breq)); 1389 count++; 1390 buf += sizeof(breq); 1391 len -= sizeof(breq); 1392 } 1393 1394 BRIDGE_UNLOCK(sc); 1395 bifc->ifbic_len = sizeof(breq) * count; 1396 error = copyout(outbuf, bifc->ifbic_req, bifc->ifbic_len); 1397 BRIDGE_LOCK(sc); 1398 free(outbuf, M_TEMP); 1399 return (error); 1400 } 1401 1402 static int 1403 bridge_ioctl_rts(struct bridge_softc *sc, void *arg) 1404 { 1405 struct ifbaconf *bac = arg; 1406 struct bridge_rtnode *brt; 1407 struct ifbareq bareq; 1408 char *buf, *outbuf; 1409 int count, buflen, len, error = 0; 1410 1411 if (bac->ifbac_len == 0) 1412 return (0); 1413 1414 count = 0; 1415 LIST_FOREACH(brt, &sc->sc_rtlist, brt_list) 1416 count++; 1417 buflen = sizeof(bareq) * count; 1418 1419 BRIDGE_UNLOCK(sc); 1420 outbuf = malloc(buflen, M_TEMP, M_WAITOK | M_ZERO); 1421 BRIDGE_LOCK(sc); 1422 1423 count = 0; 1424 buf = outbuf; 1425 len = min(bac->ifbac_len, buflen); 1426 bzero(&bareq, sizeof(bareq)); 1427 LIST_FOREACH(brt, &sc->sc_rtlist, brt_list) { 1428 if (len < sizeof(bareq)) 1429 goto out; 1430 strlcpy(bareq.ifba_ifsname, brt->brt_ifp->if_xname, 1431 sizeof(bareq.ifba_ifsname)); 1432 memcpy(bareq.ifba_dst, brt->brt_addr, sizeof(brt->brt_addr)); 1433 bareq.ifba_vlan = brt->brt_vlan; 1434 if ((brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC && 1435 time_uptime < brt->brt_expire) 1436 bareq.ifba_expire = brt->brt_expire - time_uptime; 1437 else 1438 bareq.ifba_expire = 0; 1439 bareq.ifba_flags = brt->brt_flags; 1440 1441 memcpy(buf, &bareq, sizeof(bareq)); 1442 count++; 1443 buf += sizeof(bareq); 1444 len -= sizeof(bareq); 1445 } 1446 out: 1447 BRIDGE_UNLOCK(sc); 1448 bac->ifbac_len = sizeof(bareq) * count; 1449 error = copyout(outbuf, bac->ifbac_req, bac->ifbac_len); 1450 BRIDGE_LOCK(sc); 1451 free(outbuf, M_TEMP); 1452 return (error); 1453 } 1454 1455 static int 1456 bridge_ioctl_saddr(struct bridge_softc *sc, void *arg) 1457 { 1458 struct ifbareq *req = arg; 1459 struct bridge_iflist *bif; 1460 int error; 1461 1462 bif = bridge_lookup_member(sc, req->ifba_ifsname); 1463 if (bif == NULL) 1464 return (ENOENT); 1465 1466 error = bridge_rtupdate(sc, req->ifba_dst, req->ifba_vlan, bif, 1, 1467 req->ifba_flags); 1468 1469 return (error); 1470 } 1471 1472 static int 1473 bridge_ioctl_sto(struct bridge_softc *sc, void *arg) 1474 { 1475 struct ifbrparam *param = arg; 1476 1477 sc->sc_brttimeout = param->ifbrp_ctime; 1478 return (0); 1479 } 1480 1481 static int 1482 bridge_ioctl_gto(struct bridge_softc *sc, void *arg) 1483 { 1484 struct ifbrparam *param = arg; 1485 1486 param->ifbrp_ctime = sc->sc_brttimeout; 1487 return (0); 1488 } 1489 1490 static int 1491 bridge_ioctl_daddr(struct bridge_softc *sc, void *arg) 1492 { 1493 struct ifbareq *req = arg; 1494 1495 return (bridge_rtdaddr(sc, req->ifba_dst, req->ifba_vlan)); 1496 } 1497 1498 static int 1499 bridge_ioctl_flush(struct bridge_softc *sc, void *arg) 1500 { 1501 struct ifbreq *req = arg; 1502 1503 bridge_rtflush(sc, req->ifbr_ifsflags); 1504 return (0); 1505 } 1506 1507 static int 1508 bridge_ioctl_gpri(struct bridge_softc *sc, void *arg) 1509 { 1510 struct ifbrparam *param = arg; 1511 struct bstp_state *bs = &sc->sc_stp; 1512 1513 param->ifbrp_prio = bs->bs_bridge_priority; 1514 return (0); 1515 } 1516 1517 static int 1518 bridge_ioctl_spri(struct bridge_softc *sc, void *arg) 1519 { 1520 struct ifbrparam *param = arg; 1521 1522 return (bstp_set_priority(&sc->sc_stp, param->ifbrp_prio)); 1523 } 1524 1525 static int 1526 bridge_ioctl_ght(struct bridge_softc *sc, void *arg) 1527 { 1528 struct ifbrparam *param = arg; 1529 struct bstp_state *bs = &sc->sc_stp; 1530 1531 param->ifbrp_hellotime = bs->bs_bridge_htime >> 8; 1532 return (0); 1533 } 1534 1535 static int 1536 bridge_ioctl_sht(struct bridge_softc *sc, void *arg) 1537 { 1538 struct ifbrparam *param = arg; 1539 1540 return (bstp_set_htime(&sc->sc_stp, param->ifbrp_hellotime)); 1541 } 1542 1543 static int 1544 bridge_ioctl_gfd(struct bridge_softc *sc, void *arg) 1545 { 1546 struct ifbrparam *param = arg; 1547 struct bstp_state *bs = &sc->sc_stp; 1548 1549 param->ifbrp_fwddelay = bs->bs_bridge_fdelay >> 8; 1550 return (0); 1551 } 1552 1553 static int 1554 bridge_ioctl_sfd(struct bridge_softc *sc, void *arg) 1555 { 1556 struct ifbrparam *param = arg; 1557 1558 return (bstp_set_fdelay(&sc->sc_stp, param->ifbrp_fwddelay)); 1559 } 1560 1561 static int 1562 bridge_ioctl_gma(struct bridge_softc *sc, void *arg) 1563 { 1564 struct ifbrparam *param = arg; 1565 struct bstp_state *bs = &sc->sc_stp; 1566 1567 param->ifbrp_maxage = bs->bs_bridge_max_age >> 8; 1568 return (0); 1569 } 1570 1571 static int 1572 bridge_ioctl_sma(struct bridge_softc *sc, void *arg) 1573 { 1574 struct ifbrparam *param = arg; 1575 1576 return (bstp_set_maxage(&sc->sc_stp, param->ifbrp_maxage)); 1577 } 1578 1579 static int 1580 bridge_ioctl_sifprio(struct bridge_softc *sc, void *arg) 1581 { 1582 struct ifbreq *req = arg; 1583 struct bridge_iflist *bif; 1584 1585 bif = bridge_lookup_member(sc, req->ifbr_ifsname); 1586 if (bif == NULL) 1587 return (ENOENT); 1588 1589 return (bstp_set_port_priority(&bif->bif_stp, req->ifbr_priority)); 1590 } 1591 1592 static int 1593 bridge_ioctl_sifcost(struct bridge_softc *sc, void *arg) 1594 { 1595 struct ifbreq *req = arg; 1596 struct bridge_iflist *bif; 1597 1598 bif = bridge_lookup_member(sc, req->ifbr_ifsname); 1599 if (bif == NULL) 1600 return (ENOENT); 1601 1602 return (bstp_set_path_cost(&bif->bif_stp, req->ifbr_path_cost)); 1603 } 1604 1605 static int 1606 bridge_ioctl_sifmaxaddr(struct bridge_softc *sc, void *arg) 1607 { 1608 struct ifbreq *req = arg; 1609 struct bridge_iflist *bif; 1610 1611 bif = bridge_lookup_member(sc, req->ifbr_ifsname); 1612 if (bif == NULL) 1613 return (ENOENT); 1614 1615 bif->bif_addrmax = req->ifbr_addrmax; 1616 return (0); 1617 } 1618 1619 static int 1620 bridge_ioctl_addspan(struct bridge_softc *sc, void *arg) 1621 { 1622 struct ifbreq *req = arg; 1623 struct bridge_iflist *bif = NULL; 1624 struct ifnet *ifs; 1625 1626 ifs = ifunit(req->ifbr_ifsname); 1627 if (ifs == NULL) 1628 return (ENOENT); 1629 1630 LIST_FOREACH(bif, &sc->sc_spanlist, bif_next) 1631 if (ifs == bif->bif_ifp) 1632 return (EBUSY); 1633 1634 if (ifs->if_bridge != NULL) 1635 return (EBUSY); 1636 1637 switch (ifs->if_type) { 1638 case IFT_ETHER: 1639 case IFT_GIF: 1640 case IFT_L2VLAN: 1641 break; 1642 default: 1643 return (EINVAL); 1644 } 1645 1646 bif = malloc(sizeof(*bif), M_DEVBUF, M_NOWAIT|M_ZERO); 1647 if (bif == NULL) 1648 return (ENOMEM); 1649 1650 bif->bif_ifp = ifs; 1651 bif->bif_flags = IFBIF_SPAN; 1652 1653 LIST_INSERT_HEAD(&sc->sc_spanlist, bif, bif_next); 1654 1655 return (0); 1656 } 1657 1658 static int 1659 bridge_ioctl_delspan(struct bridge_softc *sc, void *arg) 1660 { 1661 struct ifbreq *req = arg; 1662 struct bridge_iflist *bif; 1663 struct ifnet *ifs; 1664 1665 ifs = ifunit(req->ifbr_ifsname); 1666 if (ifs == NULL) 1667 return (ENOENT); 1668 1669 LIST_FOREACH(bif, &sc->sc_spanlist, bif_next) 1670 if (ifs == bif->bif_ifp) 1671 break; 1672 1673 if (bif == NULL) 1674 return (ENOENT); 1675 1676 bridge_delete_span(sc, bif); 1677 1678 return (0); 1679 } 1680 1681 static int 1682 bridge_ioctl_gbparam(struct bridge_softc *sc, void *arg) 1683 { 1684 struct ifbropreq *req = arg; 1685 struct bstp_state *bs = &sc->sc_stp; 1686 struct bstp_port *root_port; 1687 1688 req->ifbop_maxage = bs->bs_bridge_max_age >> 8; 1689 req->ifbop_hellotime = bs->bs_bridge_htime >> 8; 1690 req->ifbop_fwddelay = bs->bs_bridge_fdelay >> 8; 1691 1692 root_port = bs->bs_root_port; 1693 if (root_port == NULL) 1694 req->ifbop_root_port = 0; 1695 else 1696 req->ifbop_root_port = root_port->bp_ifp->if_index; 1697 1698 req->ifbop_holdcount = bs->bs_txholdcount; 1699 req->ifbop_priority = bs->bs_bridge_priority; 1700 req->ifbop_protocol = bs->bs_protover; 1701 req->ifbop_root_path_cost = bs->bs_root_pv.pv_cost; 1702 req->ifbop_bridgeid = bs->bs_bridge_pv.pv_dbridge_id; 1703 req->ifbop_designated_root = bs->bs_root_pv.pv_root_id; 1704 req->ifbop_designated_bridge = bs->bs_root_pv.pv_dbridge_id; 1705 req->ifbop_last_tc_time.tv_sec = bs->bs_last_tc_time.tv_sec; 1706 req->ifbop_last_tc_time.tv_usec = bs->bs_last_tc_time.tv_usec; 1707 1708 return (0); 1709 } 1710 1711 static int 1712 bridge_ioctl_grte(struct bridge_softc *sc, void *arg) 1713 { 1714 struct ifbrparam *param = arg; 1715 1716 param->ifbrp_cexceeded = sc->sc_brtexceeded; 1717 return (0); 1718 } 1719 1720 static int 1721 bridge_ioctl_gifsstp(struct bridge_softc *sc, void *arg) 1722 { 1723 struct ifbpstpconf *bifstp = arg; 1724 struct bridge_iflist *bif; 1725 struct bstp_port *bp; 1726 struct ifbpstpreq bpreq; 1727 char *buf, *outbuf; 1728 int count, buflen, len, error = 0; 1729 1730 count = 0; 1731 LIST_FOREACH(bif, &sc->sc_iflist, bif_next) { 1732 if ((bif->bif_flags & IFBIF_STP) != 0) 1733 count++; 1734 } 1735 1736 buflen = sizeof(bpreq) * count; 1737 if (bifstp->ifbpstp_len == 0) { 1738 bifstp->ifbpstp_len = buflen; 1739 return (0); 1740 } 1741 1742 BRIDGE_UNLOCK(sc); 1743 outbuf = malloc(buflen, M_TEMP, M_WAITOK | M_ZERO); 1744 BRIDGE_LOCK(sc); 1745 1746 count = 0; 1747 buf = outbuf; 1748 len = min(bifstp->ifbpstp_len, buflen); 1749 bzero(&bpreq, sizeof(bpreq)); 1750 LIST_FOREACH(bif, &sc->sc_iflist, bif_next) { 1751 if (len < sizeof(bpreq)) 1752 break; 1753 1754 if ((bif->bif_flags & IFBIF_STP) == 0) 1755 continue; 1756 1757 bp = &bif->bif_stp; 1758 bpreq.ifbp_portno = bif->bif_ifp->if_index & 0xfff; 1759 bpreq.ifbp_fwd_trans = bp->bp_forward_transitions; 1760 bpreq.ifbp_design_cost = bp->bp_desg_pv.pv_cost; 1761 bpreq.ifbp_design_port = bp->bp_desg_pv.pv_port_id; 1762 bpreq.ifbp_design_bridge = bp->bp_desg_pv.pv_dbridge_id; 1763 bpreq.ifbp_design_root = bp->bp_desg_pv.pv_root_id; 1764 1765 memcpy(buf, &bpreq, sizeof(bpreq)); 1766 count++; 1767 buf += sizeof(bpreq); 1768 len -= sizeof(bpreq); 1769 } 1770 1771 BRIDGE_UNLOCK(sc); 1772 bifstp->ifbpstp_len = sizeof(bpreq) * count; 1773 error = copyout(outbuf, bifstp->ifbpstp_req, bifstp->ifbpstp_len); 1774 BRIDGE_LOCK(sc); 1775 free(outbuf, M_TEMP); 1776 return (error); 1777 } 1778 1779 static int 1780 bridge_ioctl_sproto(struct bridge_softc *sc, void *arg) 1781 { 1782 struct ifbrparam *param = arg; 1783 1784 return (bstp_set_protocol(&sc->sc_stp, param->ifbrp_proto)); 1785 } 1786 1787 static int 1788 bridge_ioctl_stxhc(struct bridge_softc *sc, void *arg) 1789 { 1790 struct ifbrparam *param = arg; 1791 1792 return (bstp_set_holdcount(&sc->sc_stp, param->ifbrp_txhc)); 1793 } 1794 1795 /* 1796 * bridge_ifdetach: 1797 * 1798 * Detach an interface from a bridge. Called when a member 1799 * interface is detaching. 1800 */ 1801 static void 1802 bridge_ifdetach(void *arg __unused, struct ifnet *ifp) 1803 { 1804 struct bridge_softc *sc = ifp->if_bridge; 1805 struct bridge_iflist *bif; 1806 1807 if (ifp->if_flags & IFF_RENAMING) 1808 return; 1809 if (V_bridge_cloner == NULL) { 1810 /* 1811 * This detach handler can be called after 1812 * vnet_bridge_uninit(). Just return in that case. 1813 */ 1814 return; 1815 } 1816 /* Check if the interface is a bridge member */ 1817 if (sc != NULL) { 1818 BRIDGE_LOCK(sc); 1819 1820 bif = bridge_lookup_member_if(sc, ifp); 1821 if (bif != NULL) 1822 bridge_delete_member(sc, bif, 1); 1823 1824 BRIDGE_UNLOCK(sc); 1825 return; 1826 } 1827 1828 /* Check if the interface is a span port */ 1829 BRIDGE_LIST_LOCK(); 1830 LIST_FOREACH(sc, &V_bridge_list, sc_list) { 1831 BRIDGE_LOCK(sc); 1832 LIST_FOREACH(bif, &sc->sc_spanlist, bif_next) 1833 if (ifp == bif->bif_ifp) { 1834 bridge_delete_span(sc, bif); 1835 break; 1836 } 1837 1838 BRIDGE_UNLOCK(sc); 1839 } 1840 BRIDGE_LIST_UNLOCK(); 1841 } 1842 1843 /* 1844 * bridge_init: 1845 * 1846 * Initialize a bridge interface. 1847 */ 1848 static void 1849 bridge_init(void *xsc) 1850 { 1851 struct bridge_softc *sc = (struct bridge_softc *)xsc; 1852 struct ifnet *ifp = sc->sc_ifp; 1853 1854 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 1855 return; 1856 1857 BRIDGE_LOCK(sc); 1858 callout_reset(&sc->sc_brcallout, bridge_rtable_prune_period * hz, 1859 bridge_timer, sc); 1860 1861 ifp->if_drv_flags |= IFF_DRV_RUNNING; 1862 bstp_init(&sc->sc_stp); /* Initialize Spanning Tree */ 1863 1864 BRIDGE_UNLOCK(sc); 1865 } 1866 1867 /* 1868 * bridge_stop: 1869 * 1870 * Stop the bridge interface. 1871 */ 1872 static void 1873 bridge_stop(struct ifnet *ifp, int disable) 1874 { 1875 struct bridge_softc *sc = ifp->if_softc; 1876 1877 BRIDGE_LOCK_ASSERT(sc); 1878 1879 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) 1880 return; 1881 1882 callout_stop(&sc->sc_brcallout); 1883 bstp_stop(&sc->sc_stp); 1884 1885 bridge_rtflush(sc, IFBF_FLUSHDYN); 1886 1887 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1888 } 1889 1890 /* 1891 * bridge_enqueue: 1892 * 1893 * Enqueue a packet on a bridge member interface. 1894 * 1895 */ 1896 static int 1897 bridge_enqueue(struct bridge_softc *sc, struct ifnet *dst_ifp, struct mbuf *m) 1898 { 1899 int len, err = 0; 1900 short mflags; 1901 struct mbuf *m0; 1902 1903 /* We may be sending a fragment so traverse the mbuf */ 1904 for (; m; m = m0) { 1905 m0 = m->m_nextpkt; 1906 m->m_nextpkt = NULL; 1907 len = m->m_pkthdr.len; 1908 mflags = m->m_flags; 1909 1910 /* 1911 * If underlying interface can not do VLAN tag insertion itself 1912 * then attach a packet tag that holds it. 1913 */ 1914 if ((m->m_flags & M_VLANTAG) && 1915 (dst_ifp->if_capenable & IFCAP_VLAN_HWTAGGING) == 0) { 1916 m = ether_vlanencap(m, m->m_pkthdr.ether_vtag); 1917 if (m == NULL) { 1918 if_printf(dst_ifp, 1919 "unable to prepend VLAN header\n"); 1920 if_inc_counter(dst_ifp, IFCOUNTER_OERRORS, 1); 1921 continue; 1922 } 1923 m->m_flags &= ~M_VLANTAG; 1924 } 1925 1926 M_ASSERTPKTHDR(m); /* We shouldn't transmit mbuf without pkthdr */ 1927 if ((err = dst_ifp->if_transmit(dst_ifp, m))) { 1928 m_freem(m0); 1929 if_inc_counter(sc->sc_ifp, IFCOUNTER_OERRORS, 1); 1930 break; 1931 } 1932 1933 if_inc_counter(sc->sc_ifp, IFCOUNTER_OPACKETS, 1); 1934 if_inc_counter(sc->sc_ifp, IFCOUNTER_OBYTES, len); 1935 if (mflags & M_MCAST) 1936 if_inc_counter(sc->sc_ifp, IFCOUNTER_OMCASTS, 1); 1937 } 1938 1939 return (err); 1940 } 1941 1942 /* 1943 * bridge_dummynet: 1944 * 1945 * Receive a queued packet from dummynet and pass it on to the output 1946 * interface. 1947 * 1948 * The mbuf has the Ethernet header already attached. 1949 */ 1950 static void 1951 bridge_dummynet(struct mbuf *m, struct ifnet *ifp) 1952 { 1953 struct bridge_softc *sc; 1954 1955 sc = ifp->if_bridge; 1956 1957 /* 1958 * The packet didnt originate from a member interface. This should only 1959 * ever happen if a member interface is removed while packets are 1960 * queued for it. 1961 */ 1962 if (sc == NULL) { 1963 m_freem(m); 1964 return; 1965 } 1966 1967 if (PFIL_HOOKED(&V_inet_pfil_hook) 1968 #ifdef INET6 1969 || PFIL_HOOKED(&V_inet6_pfil_hook) 1970 #endif 1971 ) { 1972 if (bridge_pfil(&m, sc->sc_ifp, ifp, PFIL_OUT) != 0) 1973 return; 1974 if (m == NULL) 1975 return; 1976 } 1977 1978 bridge_enqueue(sc, ifp, m); 1979 } 1980 1981 /* 1982 * bridge_output: 1983 * 1984 * Send output from a bridge member interface. This 1985 * performs the bridging function for locally originated 1986 * packets. 1987 * 1988 * The mbuf has the Ethernet header already attached. We must 1989 * enqueue or free the mbuf before returning. 1990 */ 1991 static int 1992 bridge_output(struct ifnet *ifp, struct mbuf *m, struct sockaddr *sa, 1993 struct rtentry *rt) 1994 { 1995 struct ether_header *eh; 1996 struct ifnet *dst_if; 1997 struct bridge_softc *sc; 1998 uint16_t vlan; 1999 2000 if (m->m_len < ETHER_HDR_LEN) { 2001 m = m_pullup(m, ETHER_HDR_LEN); 2002 if (m == NULL) 2003 return (0); 2004 } 2005 2006 eh = mtod(m, struct ether_header *); 2007 sc = ifp->if_bridge; 2008 vlan = VLANTAGOF(m); 2009 2010 BRIDGE_LOCK(sc); 2011 2012 /* 2013 * If bridge is down, but the original output interface is up, 2014 * go ahead and send out that interface. Otherwise, the packet 2015 * is dropped below. 2016 */ 2017 if ((sc->sc_ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { 2018 dst_if = ifp; 2019 goto sendunicast; 2020 } 2021 2022 /* 2023 * If the packet is a multicast, or we don't know a better way to 2024 * get there, send to all interfaces. 2025 */ 2026 if (ETHER_IS_MULTICAST(eh->ether_dhost)) 2027 dst_if = NULL; 2028 else 2029 dst_if = bridge_rtlookup(sc, eh->ether_dhost, vlan); 2030 if (dst_if == NULL) { 2031 struct bridge_iflist *bif; 2032 struct mbuf *mc; 2033 int error = 0, used = 0; 2034 2035 bridge_span(sc, m); 2036 2037 BRIDGE_LOCK2REF(sc, error); 2038 if (error) { 2039 m_freem(m); 2040 return (0); 2041 } 2042 2043 LIST_FOREACH(bif, &sc->sc_iflist, bif_next) { 2044 dst_if = bif->bif_ifp; 2045 2046 if (dst_if->if_type == IFT_GIF) 2047 continue; 2048 if ((dst_if->if_drv_flags & IFF_DRV_RUNNING) == 0) 2049 continue; 2050 2051 /* 2052 * If this is not the original output interface, 2053 * and the interface is participating in spanning 2054 * tree, make sure the port is in a state that 2055 * allows forwarding. 2056 */ 2057 if (dst_if != ifp && (bif->bif_flags & IFBIF_STP) && 2058 bif->bif_stp.bp_state == BSTP_IFSTATE_DISCARDING) 2059 continue; 2060 2061 if (LIST_NEXT(bif, bif_next) == NULL) { 2062 used = 1; 2063 mc = m; 2064 } else { 2065 mc = m_copypacket(m, M_NOWAIT); 2066 if (mc == NULL) { 2067 if_inc_counter(sc->sc_ifp, IFCOUNTER_OERRORS, 1); 2068 continue; 2069 } 2070 } 2071 2072 bridge_enqueue(sc, dst_if, mc); 2073 } 2074 if (used == 0) 2075 m_freem(m); 2076 BRIDGE_UNREF(sc); 2077 return (0); 2078 } 2079 2080 sendunicast: 2081 /* 2082 * XXX Spanning tree consideration here? 2083 */ 2084 2085 bridge_span(sc, m); 2086 if ((dst_if->if_drv_flags & IFF_DRV_RUNNING) == 0) { 2087 m_freem(m); 2088 BRIDGE_UNLOCK(sc); 2089 return (0); 2090 } 2091 2092 BRIDGE_UNLOCK(sc); 2093 bridge_enqueue(sc, dst_if, m); 2094 return (0); 2095 } 2096 2097 /* 2098 * bridge_transmit: 2099 * 2100 * Do output on a bridge. 2101 * 2102 */ 2103 static int 2104 bridge_transmit(struct ifnet *ifp, struct mbuf *m) 2105 { 2106 struct bridge_softc *sc; 2107 struct ether_header *eh; 2108 struct ifnet *dst_if; 2109 int error = 0; 2110 2111 sc = ifp->if_softc; 2112 2113 ETHER_BPF_MTAP(ifp, m); 2114 2115 eh = mtod(m, struct ether_header *); 2116 2117 BRIDGE_LOCK(sc); 2118 if (((m->m_flags & (M_BCAST|M_MCAST)) == 0) && 2119 (dst_if = bridge_rtlookup(sc, eh->ether_dhost, 1)) != NULL) { 2120 BRIDGE_UNLOCK(sc); 2121 error = bridge_enqueue(sc, dst_if, m); 2122 } else 2123 bridge_broadcast(sc, ifp, m, 0); 2124 2125 return (error); 2126 } 2127 2128 /* 2129 * The ifp->if_qflush entry point for if_bridge(4) is no-op. 2130 */ 2131 static void 2132 bridge_qflush(struct ifnet *ifp __unused) 2133 { 2134 } 2135 2136 /* 2137 * bridge_forward: 2138 * 2139 * The forwarding function of the bridge. 2140 * 2141 * NOTE: Releases the lock on return. 2142 */ 2143 static void 2144 bridge_forward(struct bridge_softc *sc, struct bridge_iflist *sbif, 2145 struct mbuf *m) 2146 { 2147 struct bridge_iflist *dbif; 2148 struct ifnet *src_if, *dst_if, *ifp; 2149 struct ether_header *eh; 2150 uint16_t vlan; 2151 uint8_t *dst; 2152 int error; 2153 2154 src_if = m->m_pkthdr.rcvif; 2155 ifp = sc->sc_ifp; 2156 2157 if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); 2158 if_inc_counter(ifp, IFCOUNTER_IBYTES, m->m_pkthdr.len); 2159 vlan = VLANTAGOF(m); 2160 2161 if ((sbif->bif_flags & IFBIF_STP) && 2162 sbif->bif_stp.bp_state == BSTP_IFSTATE_DISCARDING) 2163 goto drop; 2164 2165 eh = mtod(m, struct ether_header *); 2166 dst = eh->ether_dhost; 2167 2168 /* If the interface is learning, record the address. */ 2169 if (sbif->bif_flags & IFBIF_LEARNING) { 2170 error = bridge_rtupdate(sc, eh->ether_shost, vlan, 2171 sbif, 0, IFBAF_DYNAMIC); 2172 /* 2173 * If the interface has addresses limits then deny any source 2174 * that is not in the cache. 2175 */ 2176 if (error && sbif->bif_addrmax) 2177 goto drop; 2178 } 2179 2180 if ((sbif->bif_flags & IFBIF_STP) != 0 && 2181 sbif->bif_stp.bp_state == BSTP_IFSTATE_LEARNING) 2182 goto drop; 2183 2184 /* 2185 * At this point, the port either doesn't participate 2186 * in spanning tree or it is in the forwarding state. 2187 */ 2188 2189 /* 2190 * If the packet is unicast, destined for someone on 2191 * "this" side of the bridge, drop it. 2192 */ 2193 if ((m->m_flags & (M_BCAST|M_MCAST)) == 0) { 2194 dst_if = bridge_rtlookup(sc, dst, vlan); 2195 if (src_if == dst_if) 2196 goto drop; 2197 } else { 2198 /* 2199 * Check if its a reserved multicast address, any address 2200 * listed in 802.1D section 7.12.6 may not be forwarded by the 2201 * bridge. 2202 * This is currently 01-80-C2-00-00-00 to 01-80-C2-00-00-0F 2203 */ 2204 if (dst[0] == 0x01 && dst[1] == 0x80 && 2205 dst[2] == 0xc2 && dst[3] == 0x00 && 2206 dst[4] == 0x00 && dst[5] <= 0x0f) 2207 goto drop; 2208 2209 /* ...forward it to all interfaces. */ 2210 if_inc_counter(ifp, IFCOUNTER_IMCASTS, 1); 2211 dst_if = NULL; 2212 } 2213 2214 /* 2215 * If we have a destination interface which is a member of our bridge, 2216 * OR this is a unicast packet, push it through the bpf(4) machinery. 2217 * For broadcast or multicast packets, don't bother because it will 2218 * be reinjected into ether_input. We do this before we pass the packets 2219 * through the pfil(9) framework, as it is possible that pfil(9) will 2220 * drop the packet, or possibly modify it, making it difficult to debug 2221 * firewall issues on the bridge. 2222 */ 2223 if (dst_if != NULL || (m->m_flags & (M_BCAST | M_MCAST)) == 0) 2224 ETHER_BPF_MTAP(ifp, m); 2225 2226 /* run the packet filter */ 2227 if (PFIL_HOOKED(&V_inet_pfil_hook) 2228 #ifdef INET6 2229 || PFIL_HOOKED(&V_inet6_pfil_hook) 2230 #endif 2231 ) { 2232 BRIDGE_UNLOCK(sc); 2233 if (bridge_pfil(&m, ifp, src_if, PFIL_IN) != 0) 2234 return; 2235 if (m == NULL) 2236 return; 2237 BRIDGE_LOCK(sc); 2238 } 2239 2240 if (dst_if == NULL) { 2241 bridge_broadcast(sc, src_if, m, 1); 2242 return; 2243 } 2244 2245 /* 2246 * At this point, we're dealing with a unicast frame 2247 * going to a different interface. 2248 */ 2249 if ((dst_if->if_drv_flags & IFF_DRV_RUNNING) == 0) 2250 goto drop; 2251 2252 dbif = bridge_lookup_member_if(sc, dst_if); 2253 if (dbif == NULL) 2254 /* Not a member of the bridge (anymore?) */ 2255 goto drop; 2256 2257 /* Private segments can not talk to each other */ 2258 if (sbif->bif_flags & dbif->bif_flags & IFBIF_PRIVATE) 2259 goto drop; 2260 2261 if ((dbif->bif_flags & IFBIF_STP) && 2262 dbif->bif_stp.bp_state == BSTP_IFSTATE_DISCARDING) 2263 goto drop; 2264 2265 BRIDGE_UNLOCK(sc); 2266 2267 if (PFIL_HOOKED(&V_inet_pfil_hook) 2268 #ifdef INET6 2269 || PFIL_HOOKED(&V_inet6_pfil_hook) 2270 #endif 2271 ) { 2272 if (bridge_pfil(&m, ifp, dst_if, PFIL_OUT) != 0) 2273 return; 2274 if (m == NULL) 2275 return; 2276 } 2277 2278 bridge_enqueue(sc, dst_if, m); 2279 return; 2280 2281 drop: 2282 BRIDGE_UNLOCK(sc); 2283 m_freem(m); 2284 } 2285 2286 /* 2287 * bridge_input: 2288 * 2289 * Receive input from a member interface. Queue the packet for 2290 * bridging if it is not for us. 2291 */ 2292 static struct mbuf * 2293 bridge_input(struct ifnet *ifp, struct mbuf *m) 2294 { 2295 struct bridge_softc *sc = ifp->if_bridge; 2296 struct bridge_iflist *bif, *bif2; 2297 struct ifnet *bifp; 2298 struct ether_header *eh; 2299 struct mbuf *mc, *mc2; 2300 uint16_t vlan; 2301 int error; 2302 2303 if ((sc->sc_ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) 2304 return (m); 2305 2306 bifp = sc->sc_ifp; 2307 vlan = VLANTAGOF(m); 2308 2309 /* 2310 * Implement support for bridge monitoring. If this flag has been 2311 * set on this interface, discard the packet once we push it through 2312 * the bpf(4) machinery, but before we do, increment the byte and 2313 * packet counters associated with this interface. 2314 */ 2315 if ((bifp->if_flags & IFF_MONITOR) != 0) { 2316 m->m_pkthdr.rcvif = bifp; 2317 ETHER_BPF_MTAP(bifp, m); 2318 if_inc_counter(bifp, IFCOUNTER_IPACKETS, 1); 2319 if_inc_counter(bifp, IFCOUNTER_IBYTES, m->m_pkthdr.len); 2320 m_freem(m); 2321 return (NULL); 2322 } 2323 BRIDGE_LOCK(sc); 2324 bif = bridge_lookup_member_if(sc, ifp); 2325 if (bif == NULL) { 2326 BRIDGE_UNLOCK(sc); 2327 return (m); 2328 } 2329 2330 eh = mtod(m, struct ether_header *); 2331 2332 bridge_span(sc, m); 2333 2334 if (m->m_flags & (M_BCAST|M_MCAST)) { 2335 /* Tap off 802.1D packets; they do not get forwarded. */ 2336 if (memcmp(eh->ether_dhost, bstp_etheraddr, 2337 ETHER_ADDR_LEN) == 0) { 2338 bstp_input(&bif->bif_stp, ifp, m); /* consumes mbuf */ 2339 BRIDGE_UNLOCK(sc); 2340 return (NULL); 2341 } 2342 2343 if ((bif->bif_flags & IFBIF_STP) && 2344 bif->bif_stp.bp_state == BSTP_IFSTATE_DISCARDING) { 2345 BRIDGE_UNLOCK(sc); 2346 return (m); 2347 } 2348 2349 /* 2350 * Make a deep copy of the packet and enqueue the copy 2351 * for bridge processing; return the original packet for 2352 * local processing. 2353 */ 2354 mc = m_dup(m, M_NOWAIT); 2355 if (mc == NULL) { 2356 BRIDGE_UNLOCK(sc); 2357 return (m); 2358 } 2359 2360 /* Perform the bridge forwarding function with the copy. */ 2361 bridge_forward(sc, bif, mc); 2362 2363 /* 2364 * Reinject the mbuf as arriving on the bridge so we have a 2365 * chance at claiming multicast packets. We can not loop back 2366 * here from ether_input as a bridge is never a member of a 2367 * bridge. 2368 */ 2369 KASSERT(bifp->if_bridge == NULL, 2370 ("loop created in bridge_input")); 2371 mc2 = m_dup(m, M_NOWAIT); 2372 if (mc2 != NULL) { 2373 /* Keep the layer3 header aligned */ 2374 int i = min(mc2->m_pkthdr.len, max_protohdr); 2375 mc2 = m_copyup(mc2, i, ETHER_ALIGN); 2376 } 2377 if (mc2 != NULL) { 2378 mc2->m_pkthdr.rcvif = bifp; 2379 (*bifp->if_input)(bifp, mc2); 2380 } 2381 2382 /* Return the original packet for local processing. */ 2383 return (m); 2384 } 2385 2386 if ((bif->bif_flags & IFBIF_STP) && 2387 bif->bif_stp.bp_state == BSTP_IFSTATE_DISCARDING) { 2388 BRIDGE_UNLOCK(sc); 2389 return (m); 2390 } 2391 2392 #if (defined(INET) || defined(INET6)) 2393 # define OR_CARP_CHECK_WE_ARE_DST(iface) \ 2394 || ((iface)->if_carp \ 2395 && (*carp_forus_p)((iface), eh->ether_dhost)) 2396 # define OR_CARP_CHECK_WE_ARE_SRC(iface) \ 2397 || ((iface)->if_carp \ 2398 && (*carp_forus_p)((iface), eh->ether_shost)) 2399 #else 2400 # define OR_CARP_CHECK_WE_ARE_DST(iface) 2401 # define OR_CARP_CHECK_WE_ARE_SRC(iface) 2402 #endif 2403 2404 #ifdef INET6 2405 # define OR_PFIL_HOOKED_INET6 \ 2406 || PFIL_HOOKED(&V_inet6_pfil_hook) 2407 #else 2408 # define OR_PFIL_HOOKED_INET6 2409 #endif 2410 2411 #define GRAB_OUR_PACKETS(iface) \ 2412 if ((iface)->if_type == IFT_GIF) \ 2413 continue; \ 2414 /* It is destined for us. */ \ 2415 if (memcmp(IF_LLADDR((iface)), eh->ether_dhost, ETHER_ADDR_LEN) == 0 \ 2416 OR_CARP_CHECK_WE_ARE_DST((iface)) \ 2417 ) { \ 2418 if ((iface)->if_type == IFT_BRIDGE) { \ 2419 ETHER_BPF_MTAP(iface, m); \ 2420 if_inc_counter(iface, IFCOUNTER_IPACKETS, 1); \ 2421 if_inc_counter(iface, IFCOUNTER_IBYTES, m->m_pkthdr.len); \ 2422 /* Filter on the physical interface. */ \ 2423 if (V_pfil_local_phys && \ 2424 (PFIL_HOOKED(&V_inet_pfil_hook) \ 2425 OR_PFIL_HOOKED_INET6)) { \ 2426 if (bridge_pfil(&m, NULL, ifp, \ 2427 PFIL_IN) != 0 || m == NULL) { \ 2428 BRIDGE_UNLOCK(sc); \ 2429 return (NULL); \ 2430 } \ 2431 eh = mtod(m, struct ether_header *); \ 2432 } \ 2433 } \ 2434 if (bif->bif_flags & IFBIF_LEARNING) { \ 2435 error = bridge_rtupdate(sc, eh->ether_shost, \ 2436 vlan, bif, 0, IFBAF_DYNAMIC); \ 2437 if (error && bif->bif_addrmax) { \ 2438 BRIDGE_UNLOCK(sc); \ 2439 m_freem(m); \ 2440 return (NULL); \ 2441 } \ 2442 } \ 2443 m->m_pkthdr.rcvif = iface; \ 2444 BRIDGE_UNLOCK(sc); \ 2445 return (m); \ 2446 } \ 2447 \ 2448 /* We just received a packet that we sent out. */ \ 2449 if (memcmp(IF_LLADDR((iface)), eh->ether_shost, ETHER_ADDR_LEN) == 0 \ 2450 OR_CARP_CHECK_WE_ARE_SRC((iface)) \ 2451 ) { \ 2452 BRIDGE_UNLOCK(sc); \ 2453 m_freem(m); \ 2454 return (NULL); \ 2455 } 2456 2457 /* 2458 * Unicast. Make sure it's not for the bridge. 2459 */ 2460 do { GRAB_OUR_PACKETS(bifp) } while (0); 2461 2462 /* 2463 * Give a chance for ifp at first priority. This will help when the 2464 * packet comes through the interface like VLAN's with the same MACs 2465 * on several interfaces from the same bridge. This also will save 2466 * some CPU cycles in case the destination interface and the input 2467 * interface (eq ifp) are the same. 2468 */ 2469 do { GRAB_OUR_PACKETS(ifp) } while (0); 2470 2471 /* Now check the all bridge members. */ 2472 LIST_FOREACH(bif2, &sc->sc_iflist, bif_next) { 2473 GRAB_OUR_PACKETS(bif2->bif_ifp) 2474 } 2475 2476 #undef OR_CARP_CHECK_WE_ARE_DST 2477 #undef OR_CARP_CHECK_WE_ARE_SRC 2478 #undef OR_PFIL_HOOKED_INET6 2479 #undef GRAB_OUR_PACKETS 2480 2481 /* Perform the bridge forwarding function. */ 2482 bridge_forward(sc, bif, m); 2483 2484 return (NULL); 2485 } 2486 2487 /* 2488 * bridge_broadcast: 2489 * 2490 * Send a frame to all interfaces that are members of 2491 * the bridge, except for the one on which the packet 2492 * arrived. 2493 * 2494 * NOTE: Releases the lock on return. 2495 */ 2496 static void 2497 bridge_broadcast(struct bridge_softc *sc, struct ifnet *src_if, 2498 struct mbuf *m, int runfilt) 2499 { 2500 struct bridge_iflist *dbif, *sbif; 2501 struct mbuf *mc; 2502 struct ifnet *dst_if; 2503 int error = 0, used = 0, i; 2504 2505 sbif = bridge_lookup_member_if(sc, src_if); 2506 2507 BRIDGE_LOCK2REF(sc, error); 2508 if (error) { 2509 m_freem(m); 2510 return; 2511 } 2512 2513 /* Filter on the bridge interface before broadcasting */ 2514 if (runfilt && (PFIL_HOOKED(&V_inet_pfil_hook) 2515 #ifdef INET6 2516 || PFIL_HOOKED(&V_inet6_pfil_hook) 2517 #endif 2518 )) { 2519 if (bridge_pfil(&m, sc->sc_ifp, NULL, PFIL_OUT) != 0) 2520 goto out; 2521 if (m == NULL) 2522 goto out; 2523 } 2524 2525 LIST_FOREACH(dbif, &sc->sc_iflist, bif_next) { 2526 dst_if = dbif->bif_ifp; 2527 if (dst_if == src_if) 2528 continue; 2529 2530 /* Private segments can not talk to each other */ 2531 if (sbif && (sbif->bif_flags & dbif->bif_flags & IFBIF_PRIVATE)) 2532 continue; 2533 2534 if ((dbif->bif_flags & IFBIF_STP) && 2535 dbif->bif_stp.bp_state == BSTP_IFSTATE_DISCARDING) 2536 continue; 2537 2538 if ((dbif->bif_flags & IFBIF_DISCOVER) == 0 && 2539 (m->m_flags & (M_BCAST|M_MCAST)) == 0) 2540 continue; 2541 2542 if ((dst_if->if_drv_flags & IFF_DRV_RUNNING) == 0) 2543 continue; 2544 2545 if (LIST_NEXT(dbif, bif_next) == NULL) { 2546 mc = m; 2547 used = 1; 2548 } else { 2549 mc = m_dup(m, M_NOWAIT); 2550 if (mc == NULL) { 2551 if_inc_counter(sc->sc_ifp, IFCOUNTER_OERRORS, 1); 2552 continue; 2553 } 2554 } 2555 2556 /* 2557 * Filter on the output interface. Pass a NULL bridge interface 2558 * pointer so we do not redundantly filter on the bridge for 2559 * each interface we broadcast on. 2560 */ 2561 if (runfilt && (PFIL_HOOKED(&V_inet_pfil_hook) 2562 #ifdef INET6 2563 || PFIL_HOOKED(&V_inet6_pfil_hook) 2564 #endif 2565 )) { 2566 if (used == 0) { 2567 /* Keep the layer3 header aligned */ 2568 i = min(mc->m_pkthdr.len, max_protohdr); 2569 mc = m_copyup(mc, i, ETHER_ALIGN); 2570 if (mc == NULL) { 2571 if_inc_counter(sc->sc_ifp, IFCOUNTER_OERRORS, 1); 2572 continue; 2573 } 2574 } 2575 if (bridge_pfil(&mc, NULL, dst_if, PFIL_OUT) != 0) 2576 continue; 2577 if (mc == NULL) 2578 continue; 2579 } 2580 2581 bridge_enqueue(sc, dst_if, mc); 2582 } 2583 if (used == 0) 2584 m_freem(m); 2585 2586 out: 2587 BRIDGE_UNREF(sc); 2588 } 2589 2590 /* 2591 * bridge_span: 2592 * 2593 * Duplicate a packet out one or more interfaces that are in span mode, 2594 * the original mbuf is unmodified. 2595 */ 2596 static void 2597 bridge_span(struct bridge_softc *sc, struct mbuf *m) 2598 { 2599 struct bridge_iflist *bif; 2600 struct ifnet *dst_if; 2601 struct mbuf *mc; 2602 2603 if (LIST_EMPTY(&sc->sc_spanlist)) 2604 return; 2605 2606 LIST_FOREACH(bif, &sc->sc_spanlist, bif_next) { 2607 dst_if = bif->bif_ifp; 2608 2609 if ((dst_if->if_drv_flags & IFF_DRV_RUNNING) == 0) 2610 continue; 2611 2612 mc = m_copypacket(m, M_NOWAIT); 2613 if (mc == NULL) { 2614 if_inc_counter(sc->sc_ifp, IFCOUNTER_OERRORS, 1); 2615 continue; 2616 } 2617 2618 bridge_enqueue(sc, dst_if, mc); 2619 } 2620 } 2621 2622 /* 2623 * bridge_rtupdate: 2624 * 2625 * Add a bridge routing entry. 2626 */ 2627 static int 2628 bridge_rtupdate(struct bridge_softc *sc, const uint8_t *dst, uint16_t vlan, 2629 struct bridge_iflist *bif, int setflags, uint8_t flags) 2630 { 2631 struct bridge_rtnode *brt; 2632 int error; 2633 2634 BRIDGE_LOCK_ASSERT(sc); 2635 2636 /* Check the source address is valid and not multicast. */ 2637 if (ETHER_IS_MULTICAST(dst) || 2638 (dst[0] == 0 && dst[1] == 0 && dst[2] == 0 && 2639 dst[3] == 0 && dst[4] == 0 && dst[5] == 0) != 0) 2640 return (EINVAL); 2641 2642 /* 802.1p frames map to vlan 1 */ 2643 if (vlan == 0) 2644 vlan = 1; 2645 2646 /* 2647 * A route for this destination might already exist. If so, 2648 * update it, otherwise create a new one. 2649 */ 2650 if ((brt = bridge_rtnode_lookup(sc, dst, vlan)) == NULL) { 2651 if (sc->sc_brtcnt >= sc->sc_brtmax) { 2652 sc->sc_brtexceeded++; 2653 return (ENOSPC); 2654 } 2655 /* Check per interface address limits (if enabled) */ 2656 if (bif->bif_addrmax && bif->bif_addrcnt >= bif->bif_addrmax) { 2657 bif->bif_addrexceeded++; 2658 return (ENOSPC); 2659 } 2660 2661 /* 2662 * Allocate a new bridge forwarding node, and 2663 * initialize the expiration time and Ethernet 2664 * address. 2665 */ 2666 brt = uma_zalloc(bridge_rtnode_zone, M_NOWAIT | M_ZERO); 2667 if (brt == NULL) 2668 return (ENOMEM); 2669 2670 if (bif->bif_flags & IFBIF_STICKY) 2671 brt->brt_flags = IFBAF_STICKY; 2672 else 2673 brt->brt_flags = IFBAF_DYNAMIC; 2674 2675 memcpy(brt->brt_addr, dst, ETHER_ADDR_LEN); 2676 brt->brt_vlan = vlan; 2677 2678 if ((error = bridge_rtnode_insert(sc, brt)) != 0) { 2679 uma_zfree(bridge_rtnode_zone, brt); 2680 return (error); 2681 } 2682 brt->brt_dst = bif; 2683 bif->bif_addrcnt++; 2684 } 2685 2686 if ((brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC && 2687 brt->brt_dst != bif) { 2688 brt->brt_dst->bif_addrcnt--; 2689 brt->brt_dst = bif; 2690 brt->brt_dst->bif_addrcnt++; 2691 } 2692 2693 if ((flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC) 2694 brt->brt_expire = time_uptime + sc->sc_brttimeout; 2695 if (setflags) 2696 brt->brt_flags = flags; 2697 2698 return (0); 2699 } 2700 2701 /* 2702 * bridge_rtlookup: 2703 * 2704 * Lookup the destination interface for an address. 2705 */ 2706 static struct ifnet * 2707 bridge_rtlookup(struct bridge_softc *sc, const uint8_t *addr, uint16_t vlan) 2708 { 2709 struct bridge_rtnode *brt; 2710 2711 BRIDGE_LOCK_ASSERT(sc); 2712 2713 if ((brt = bridge_rtnode_lookup(sc, addr, vlan)) == NULL) 2714 return (NULL); 2715 2716 return (brt->brt_ifp); 2717 } 2718 2719 /* 2720 * bridge_rttrim: 2721 * 2722 * Trim the routine table so that we have a number 2723 * of routing entries less than or equal to the 2724 * maximum number. 2725 */ 2726 static void 2727 bridge_rttrim(struct bridge_softc *sc) 2728 { 2729 struct bridge_rtnode *brt, *nbrt; 2730 2731 BRIDGE_LOCK_ASSERT(sc); 2732 2733 /* Make sure we actually need to do this. */ 2734 if (sc->sc_brtcnt <= sc->sc_brtmax) 2735 return; 2736 2737 /* Force an aging cycle; this might trim enough addresses. */ 2738 bridge_rtage(sc); 2739 if (sc->sc_brtcnt <= sc->sc_brtmax) 2740 return; 2741 2742 LIST_FOREACH_SAFE(brt, &sc->sc_rtlist, brt_list, nbrt) { 2743 if ((brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC) { 2744 bridge_rtnode_destroy(sc, brt); 2745 if (sc->sc_brtcnt <= sc->sc_brtmax) 2746 return; 2747 } 2748 } 2749 } 2750 2751 /* 2752 * bridge_timer: 2753 * 2754 * Aging timer for the bridge. 2755 */ 2756 static void 2757 bridge_timer(void *arg) 2758 { 2759 struct bridge_softc *sc = arg; 2760 2761 BRIDGE_LOCK_ASSERT(sc); 2762 2763 bridge_rtage(sc); 2764 2765 if (sc->sc_ifp->if_drv_flags & IFF_DRV_RUNNING) 2766 callout_reset(&sc->sc_brcallout, 2767 bridge_rtable_prune_period * hz, bridge_timer, sc); 2768 } 2769 2770 /* 2771 * bridge_rtage: 2772 * 2773 * Perform an aging cycle. 2774 */ 2775 static void 2776 bridge_rtage(struct bridge_softc *sc) 2777 { 2778 struct bridge_rtnode *brt, *nbrt; 2779 2780 BRIDGE_LOCK_ASSERT(sc); 2781 2782 LIST_FOREACH_SAFE(brt, &sc->sc_rtlist, brt_list, nbrt) { 2783 if ((brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC) { 2784 if (time_uptime >= brt->brt_expire) 2785 bridge_rtnode_destroy(sc, brt); 2786 } 2787 } 2788 } 2789 2790 /* 2791 * bridge_rtflush: 2792 * 2793 * Remove all dynamic addresses from the bridge. 2794 */ 2795 static void 2796 bridge_rtflush(struct bridge_softc *sc, int full) 2797 { 2798 struct bridge_rtnode *brt, *nbrt; 2799 2800 BRIDGE_LOCK_ASSERT(sc); 2801 2802 LIST_FOREACH_SAFE(brt, &sc->sc_rtlist, brt_list, nbrt) { 2803 if (full || (brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC) 2804 bridge_rtnode_destroy(sc, brt); 2805 } 2806 } 2807 2808 /* 2809 * bridge_rtdaddr: 2810 * 2811 * Remove an address from the table. 2812 */ 2813 static int 2814 bridge_rtdaddr(struct bridge_softc *sc, const uint8_t *addr, uint16_t vlan) 2815 { 2816 struct bridge_rtnode *brt; 2817 int found = 0; 2818 2819 BRIDGE_LOCK_ASSERT(sc); 2820 2821 /* 2822 * If vlan is zero then we want to delete for all vlans so the lookup 2823 * may return more than one. 2824 */ 2825 while ((brt = bridge_rtnode_lookup(sc, addr, vlan)) != NULL) { 2826 bridge_rtnode_destroy(sc, brt); 2827 found = 1; 2828 } 2829 2830 return (found ? 0 : ENOENT); 2831 } 2832 2833 /* 2834 * bridge_rtdelete: 2835 * 2836 * Delete routes to a speicifc member interface. 2837 */ 2838 static void 2839 bridge_rtdelete(struct bridge_softc *sc, struct ifnet *ifp, int full) 2840 { 2841 struct bridge_rtnode *brt, *nbrt; 2842 2843 BRIDGE_LOCK_ASSERT(sc); 2844 2845 LIST_FOREACH_SAFE(brt, &sc->sc_rtlist, brt_list, nbrt) { 2846 if (brt->brt_ifp == ifp && (full || 2847 (brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC)) 2848 bridge_rtnode_destroy(sc, brt); 2849 } 2850 } 2851 2852 /* 2853 * bridge_rtable_init: 2854 * 2855 * Initialize the route table for this bridge. 2856 */ 2857 static void 2858 bridge_rtable_init(struct bridge_softc *sc) 2859 { 2860 int i; 2861 2862 sc->sc_rthash = malloc(sizeof(*sc->sc_rthash) * BRIDGE_RTHASH_SIZE, 2863 M_DEVBUF, M_WAITOK); 2864 2865 for (i = 0; i < BRIDGE_RTHASH_SIZE; i++) 2866 LIST_INIT(&sc->sc_rthash[i]); 2867 2868 sc->sc_rthash_key = arc4random(); 2869 LIST_INIT(&sc->sc_rtlist); 2870 } 2871 2872 /* 2873 * bridge_rtable_fini: 2874 * 2875 * Deconstruct the route table for this bridge. 2876 */ 2877 static void 2878 bridge_rtable_fini(struct bridge_softc *sc) 2879 { 2880 2881 KASSERT(sc->sc_brtcnt == 0, 2882 ("%s: %d bridge routes referenced", __func__, sc->sc_brtcnt)); 2883 free(sc->sc_rthash, M_DEVBUF); 2884 } 2885 2886 /* 2887 * The following hash function is adapted from "Hash Functions" by Bob Jenkins 2888 * ("Algorithm Alley", Dr. Dobbs Journal, September 1997). 2889 */ 2890 #define mix(a, b, c) \ 2891 do { \ 2892 a -= b; a -= c; a ^= (c >> 13); \ 2893 b -= c; b -= a; b ^= (a << 8); \ 2894 c -= a; c -= b; c ^= (b >> 13); \ 2895 a -= b; a -= c; a ^= (c >> 12); \ 2896 b -= c; b -= a; b ^= (a << 16); \ 2897 c -= a; c -= b; c ^= (b >> 5); \ 2898 a -= b; a -= c; a ^= (c >> 3); \ 2899 b -= c; b -= a; b ^= (a << 10); \ 2900 c -= a; c -= b; c ^= (b >> 15); \ 2901 } while (/*CONSTCOND*/0) 2902 2903 static __inline uint32_t 2904 bridge_rthash(struct bridge_softc *sc, const uint8_t *addr) 2905 { 2906 uint32_t a = 0x9e3779b9, b = 0x9e3779b9, c = sc->sc_rthash_key; 2907 2908 b += addr[5] << 8; 2909 b += addr[4]; 2910 a += addr[3] << 24; 2911 a += addr[2] << 16; 2912 a += addr[1] << 8; 2913 a += addr[0]; 2914 2915 mix(a, b, c); 2916 2917 return (c & BRIDGE_RTHASH_MASK); 2918 } 2919 2920 #undef mix 2921 2922 static int 2923 bridge_rtnode_addr_cmp(const uint8_t *a, const uint8_t *b) 2924 { 2925 int i, d; 2926 2927 for (i = 0, d = 0; i < ETHER_ADDR_LEN && d == 0; i++) { 2928 d = ((int)a[i]) - ((int)b[i]); 2929 } 2930 2931 return (d); 2932 } 2933 2934 /* 2935 * bridge_rtnode_lookup: 2936 * 2937 * Look up a bridge route node for the specified destination. Compare the 2938 * vlan id or if zero then just return the first match. 2939 */ 2940 static struct bridge_rtnode * 2941 bridge_rtnode_lookup(struct bridge_softc *sc, const uint8_t *addr, uint16_t vlan) 2942 { 2943 struct bridge_rtnode *brt; 2944 uint32_t hash; 2945 int dir; 2946 2947 BRIDGE_LOCK_ASSERT(sc); 2948 2949 hash = bridge_rthash(sc, addr); 2950 LIST_FOREACH(brt, &sc->sc_rthash[hash], brt_hash) { 2951 dir = bridge_rtnode_addr_cmp(addr, brt->brt_addr); 2952 if (dir == 0 && (brt->brt_vlan == vlan || vlan == 0)) 2953 return (brt); 2954 if (dir > 0) 2955 return (NULL); 2956 } 2957 2958 return (NULL); 2959 } 2960 2961 /* 2962 * bridge_rtnode_insert: 2963 * 2964 * Insert the specified bridge node into the route table. We 2965 * assume the entry is not already in the table. 2966 */ 2967 static int 2968 bridge_rtnode_insert(struct bridge_softc *sc, struct bridge_rtnode *brt) 2969 { 2970 struct bridge_rtnode *lbrt; 2971 uint32_t hash; 2972 int dir; 2973 2974 BRIDGE_LOCK_ASSERT(sc); 2975 2976 hash = bridge_rthash(sc, brt->brt_addr); 2977 2978 lbrt = LIST_FIRST(&sc->sc_rthash[hash]); 2979 if (lbrt == NULL) { 2980 LIST_INSERT_HEAD(&sc->sc_rthash[hash], brt, brt_hash); 2981 goto out; 2982 } 2983 2984 do { 2985 dir = bridge_rtnode_addr_cmp(brt->brt_addr, lbrt->brt_addr); 2986 if (dir == 0 && brt->brt_vlan == lbrt->brt_vlan) 2987 return (EEXIST); 2988 if (dir > 0) { 2989 LIST_INSERT_BEFORE(lbrt, brt, brt_hash); 2990 goto out; 2991 } 2992 if (LIST_NEXT(lbrt, brt_hash) == NULL) { 2993 LIST_INSERT_AFTER(lbrt, brt, brt_hash); 2994 goto out; 2995 } 2996 lbrt = LIST_NEXT(lbrt, brt_hash); 2997 } while (lbrt != NULL); 2998 2999 #ifdef DIAGNOSTIC 3000 panic("bridge_rtnode_insert: impossible"); 3001 #endif 3002 3003 out: 3004 LIST_INSERT_HEAD(&sc->sc_rtlist, brt, brt_list); 3005 sc->sc_brtcnt++; 3006 3007 return (0); 3008 } 3009 3010 /* 3011 * bridge_rtnode_destroy: 3012 * 3013 * Destroy a bridge rtnode. 3014 */ 3015 static void 3016 bridge_rtnode_destroy(struct bridge_softc *sc, struct bridge_rtnode *brt) 3017 { 3018 BRIDGE_LOCK_ASSERT(sc); 3019 3020 LIST_REMOVE(brt, brt_hash); 3021 3022 LIST_REMOVE(brt, brt_list); 3023 sc->sc_brtcnt--; 3024 brt->brt_dst->bif_addrcnt--; 3025 uma_zfree(bridge_rtnode_zone, brt); 3026 } 3027 3028 /* 3029 * bridge_rtable_expire: 3030 * 3031 * Set the expiry time for all routes on an interface. 3032 */ 3033 static void 3034 bridge_rtable_expire(struct ifnet *ifp, int age) 3035 { 3036 struct bridge_softc *sc = ifp->if_bridge; 3037 struct bridge_rtnode *brt; 3038 3039 BRIDGE_LOCK(sc); 3040 3041 /* 3042 * If the age is zero then flush, otherwise set all the expiry times to 3043 * age for the interface 3044 */ 3045 if (age == 0) 3046 bridge_rtdelete(sc, ifp, IFBF_FLUSHDYN); 3047 else { 3048 LIST_FOREACH(brt, &sc->sc_rtlist, brt_list) { 3049 /* Cap the expiry time to 'age' */ 3050 if (brt->brt_ifp == ifp && 3051 brt->brt_expire > time_uptime + age && 3052 (brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC) 3053 brt->brt_expire = time_uptime + age; 3054 } 3055 } 3056 BRIDGE_UNLOCK(sc); 3057 } 3058 3059 /* 3060 * bridge_state_change: 3061 * 3062 * Callback from the bridgestp code when a port changes states. 3063 */ 3064 static void 3065 bridge_state_change(struct ifnet *ifp, int state) 3066 { 3067 struct bridge_softc *sc = ifp->if_bridge; 3068 static const char *stpstates[] = { 3069 "disabled", 3070 "listening", 3071 "learning", 3072 "forwarding", 3073 "blocking", 3074 "discarding" 3075 }; 3076 3077 CURVNET_SET(ifp->if_vnet); 3078 if (V_log_stp) 3079 log(LOG_NOTICE, "%s: state changed to %s on %s\n", 3080 sc->sc_ifp->if_xname, stpstates[state], ifp->if_xname); 3081 CURVNET_RESTORE(); 3082 } 3083 3084 /* 3085 * Send bridge packets through pfil if they are one of the types pfil can deal 3086 * with, or if they are ARP or REVARP. (pfil will pass ARP and REVARP without 3087 * question.) If *bifp or *ifp are NULL then packet filtering is skipped for 3088 * that interface. 3089 */ 3090 static int 3091 bridge_pfil(struct mbuf **mp, struct ifnet *bifp, struct ifnet *ifp, int dir) 3092 { 3093 int snap, error, i, hlen; 3094 struct ether_header *eh1, eh2; 3095 struct ip *ip; 3096 struct llc llc1; 3097 u_int16_t ether_type; 3098 3099 snap = 0; 3100 error = -1; /* Default error if not error == 0 */ 3101 3102 #if 0 3103 /* we may return with the IP fields swapped, ensure its not shared */ 3104 KASSERT(M_WRITABLE(*mp), ("%s: modifying a shared mbuf", __func__)); 3105 #endif 3106 3107 if (V_pfil_bridge == 0 && V_pfil_member == 0 && V_pfil_ipfw == 0) 3108 return (0); /* filtering is disabled */ 3109 3110 i = min((*mp)->m_pkthdr.len, max_protohdr); 3111 if ((*mp)->m_len < i) { 3112 *mp = m_pullup(*mp, i); 3113 if (*mp == NULL) { 3114 printf("%s: m_pullup failed\n", __func__); 3115 return (-1); 3116 } 3117 } 3118 3119 eh1 = mtod(*mp, struct ether_header *); 3120 ether_type = ntohs(eh1->ether_type); 3121 3122 /* 3123 * Check for SNAP/LLC. 3124 */ 3125 if (ether_type < ETHERMTU) { 3126 struct llc *llc2 = (struct llc *)(eh1 + 1); 3127 3128 if ((*mp)->m_len >= ETHER_HDR_LEN + 8 && 3129 llc2->llc_dsap == LLC_SNAP_LSAP && 3130 llc2->llc_ssap == LLC_SNAP_LSAP && 3131 llc2->llc_control == LLC_UI) { 3132 ether_type = htons(llc2->llc_un.type_snap.ether_type); 3133 snap = 1; 3134 } 3135 } 3136 3137 /* 3138 * If we're trying to filter bridge traffic, don't look at anything 3139 * other than IP and ARP traffic. If the filter doesn't understand 3140 * IPv6, don't allow IPv6 through the bridge either. This is lame 3141 * since if we really wanted, say, an AppleTalk filter, we are hosed, 3142 * but of course we don't have an AppleTalk filter to begin with. 3143 * (Note that since pfil doesn't understand ARP it will pass *ALL* 3144 * ARP traffic.) 3145 */ 3146 switch (ether_type) { 3147 case ETHERTYPE_ARP: 3148 case ETHERTYPE_REVARP: 3149 if (V_pfil_ipfw_arp == 0) 3150 return (0); /* Automatically pass */ 3151 break; 3152 3153 case ETHERTYPE_IP: 3154 #ifdef INET6 3155 case ETHERTYPE_IPV6: 3156 #endif /* INET6 */ 3157 break; 3158 default: 3159 /* 3160 * Check to see if the user wants to pass non-ip 3161 * packets, these will not be checked by pfil(9) and 3162 * passed unconditionally so the default is to drop. 3163 */ 3164 if (V_pfil_onlyip) 3165 goto bad; 3166 } 3167 3168 /* Run the packet through pfil before stripping link headers */ 3169 if (PFIL_HOOKED(&V_link_pfil_hook) && V_pfil_ipfw != 0 && 3170 dir == PFIL_OUT && ifp != NULL) { 3171 3172 error = pfil_run_hooks(&V_link_pfil_hook, mp, ifp, dir, NULL); 3173 3174 if (*mp == NULL || error != 0) /* packet consumed by filter */ 3175 return (error); 3176 } 3177 3178 /* Strip off the Ethernet header and keep a copy. */ 3179 m_copydata(*mp, 0, ETHER_HDR_LEN, (caddr_t) &eh2); 3180 m_adj(*mp, ETHER_HDR_LEN); 3181 3182 /* Strip off snap header, if present */ 3183 if (snap) { 3184 m_copydata(*mp, 0, sizeof(struct llc), (caddr_t) &llc1); 3185 m_adj(*mp, sizeof(struct llc)); 3186 } 3187 3188 /* 3189 * Check the IP header for alignment and errors 3190 */ 3191 if (dir == PFIL_IN) { 3192 switch (ether_type) { 3193 case ETHERTYPE_IP: 3194 error = bridge_ip_checkbasic(mp); 3195 break; 3196 #ifdef INET6 3197 case ETHERTYPE_IPV6: 3198 error = bridge_ip6_checkbasic(mp); 3199 break; 3200 #endif /* INET6 */ 3201 default: 3202 error = 0; 3203 } 3204 if (error) 3205 goto bad; 3206 } 3207 3208 error = 0; 3209 3210 /* 3211 * Run the packet through pfil 3212 */ 3213 switch (ether_type) { 3214 case ETHERTYPE_IP: 3215 /* 3216 * Run pfil on the member interface and the bridge, both can 3217 * be skipped by clearing pfil_member or pfil_bridge. 3218 * 3219 * Keep the order: 3220 * in_if -> bridge_if -> out_if 3221 */ 3222 if (V_pfil_bridge && dir == PFIL_OUT && bifp != NULL) 3223 error = pfil_run_hooks(&V_inet_pfil_hook, mp, bifp, 3224 dir, NULL); 3225 3226 if (*mp == NULL || error != 0) /* filter may consume */ 3227 break; 3228 3229 if (V_pfil_member && ifp != NULL) 3230 error = pfil_run_hooks(&V_inet_pfil_hook, mp, ifp, 3231 dir, NULL); 3232 3233 if (*mp == NULL || error != 0) /* filter may consume */ 3234 break; 3235 3236 if (V_pfil_bridge && dir == PFIL_IN && bifp != NULL) 3237 error = pfil_run_hooks(&V_inet_pfil_hook, mp, bifp, 3238 dir, NULL); 3239 3240 if (*mp == NULL || error != 0) /* filter may consume */ 3241 break; 3242 3243 /* check if we need to fragment the packet */ 3244 /* bridge_fragment generates a mbuf chain of packets */ 3245 /* that already include eth headers */ 3246 if (V_pfil_member && ifp != NULL && dir == PFIL_OUT) { 3247 i = (*mp)->m_pkthdr.len; 3248 if (i > ifp->if_mtu) { 3249 error = bridge_fragment(ifp, mp, &eh2, snap, 3250 &llc1); 3251 return (error); 3252 } 3253 } 3254 3255 /* Recalculate the ip checksum. */ 3256 ip = mtod(*mp, struct ip *); 3257 hlen = ip->ip_hl << 2; 3258 if (hlen < sizeof(struct ip)) 3259 goto bad; 3260 if (hlen > (*mp)->m_len) { 3261 if ((*mp = m_pullup(*mp, hlen)) == NULL) 3262 goto bad; 3263 ip = mtod(*mp, struct ip *); 3264 if (ip == NULL) 3265 goto bad; 3266 } 3267 ip->ip_sum = 0; 3268 if (hlen == sizeof(struct ip)) 3269 ip->ip_sum = in_cksum_hdr(ip); 3270 else 3271 ip->ip_sum = in_cksum(*mp, hlen); 3272 3273 break; 3274 #ifdef INET6 3275 case ETHERTYPE_IPV6: 3276 if (V_pfil_bridge && dir == PFIL_OUT && bifp != NULL) 3277 error = pfil_run_hooks(&V_inet6_pfil_hook, mp, bifp, 3278 dir, NULL); 3279 3280 if (*mp == NULL || error != 0) /* filter may consume */ 3281 break; 3282 3283 if (V_pfil_member && ifp != NULL) 3284 error = pfil_run_hooks(&V_inet6_pfil_hook, mp, ifp, 3285 dir, NULL); 3286 3287 if (*mp == NULL || error != 0) /* filter may consume */ 3288 break; 3289 3290 if (V_pfil_bridge && dir == PFIL_IN && bifp != NULL) 3291 error = pfil_run_hooks(&V_inet6_pfil_hook, mp, bifp, 3292 dir, NULL); 3293 break; 3294 #endif 3295 default: 3296 error = 0; 3297 break; 3298 } 3299 3300 if (*mp == NULL) 3301 return (error); 3302 if (error != 0) 3303 goto bad; 3304 3305 error = -1; 3306 3307 /* 3308 * Finally, put everything back the way it was and return 3309 */ 3310 if (snap) { 3311 M_PREPEND(*mp, sizeof(struct llc), M_NOWAIT); 3312 if (*mp == NULL) 3313 return (error); 3314 bcopy(&llc1, mtod(*mp, caddr_t), sizeof(struct llc)); 3315 } 3316 3317 M_PREPEND(*mp, ETHER_HDR_LEN, M_NOWAIT); 3318 if (*mp == NULL) 3319 return (error); 3320 bcopy(&eh2, mtod(*mp, caddr_t), ETHER_HDR_LEN); 3321 3322 return (0); 3323 3324 bad: 3325 m_freem(*mp); 3326 *mp = NULL; 3327 return (error); 3328 } 3329 3330 /* 3331 * Perform basic checks on header size since 3332 * pfil assumes ip_input has already processed 3333 * it for it. Cut-and-pasted from ip_input.c. 3334 * Given how simple the IPv6 version is, 3335 * does the IPv4 version really need to be 3336 * this complicated? 3337 * 3338 * XXX Should we update ipstat here, or not? 3339 * XXX Right now we update ipstat but not 3340 * XXX csum_counter. 3341 */ 3342 static int 3343 bridge_ip_checkbasic(struct mbuf **mp) 3344 { 3345 struct mbuf *m = *mp; 3346 struct ip *ip; 3347 int len, hlen; 3348 u_short sum; 3349 3350 if (*mp == NULL) 3351 return (-1); 3352 3353 if (IP_HDR_ALIGNED_P(mtod(m, caddr_t)) == 0) { 3354 if ((m = m_copyup(m, sizeof(struct ip), 3355 (max_linkhdr + 3) & ~3)) == NULL) { 3356 /* XXXJRT new stat, please */ 3357 KMOD_IPSTAT_INC(ips_toosmall); 3358 goto bad; 3359 } 3360 } else if (__predict_false(m->m_len < sizeof (struct ip))) { 3361 if ((m = m_pullup(m, sizeof (struct ip))) == NULL) { 3362 KMOD_IPSTAT_INC(ips_toosmall); 3363 goto bad; 3364 } 3365 } 3366 ip = mtod(m, struct ip *); 3367 if (ip == NULL) goto bad; 3368 3369 if (ip->ip_v != IPVERSION) { 3370 KMOD_IPSTAT_INC(ips_badvers); 3371 goto bad; 3372 } 3373 hlen = ip->ip_hl << 2; 3374 if (hlen < sizeof(struct ip)) { /* minimum header length */ 3375 KMOD_IPSTAT_INC(ips_badhlen); 3376 goto bad; 3377 } 3378 if (hlen > m->m_len) { 3379 if ((m = m_pullup(m, hlen)) == NULL) { 3380 KMOD_IPSTAT_INC(ips_badhlen); 3381 goto bad; 3382 } 3383 ip = mtod(m, struct ip *); 3384 if (ip == NULL) goto bad; 3385 } 3386 3387 if (m->m_pkthdr.csum_flags & CSUM_IP_CHECKED) { 3388 sum = !(m->m_pkthdr.csum_flags & CSUM_IP_VALID); 3389 } else { 3390 if (hlen == sizeof(struct ip)) { 3391 sum = in_cksum_hdr(ip); 3392 } else { 3393 sum = in_cksum(m, hlen); 3394 } 3395 } 3396 if (sum) { 3397 KMOD_IPSTAT_INC(ips_badsum); 3398 goto bad; 3399 } 3400 3401 /* Retrieve the packet length. */ 3402 len = ntohs(ip->ip_len); 3403 3404 /* 3405 * Check for additional length bogosity 3406 */ 3407 if (len < hlen) { 3408 KMOD_IPSTAT_INC(ips_badlen); 3409 goto bad; 3410 } 3411 3412 /* 3413 * Check that the amount of data in the buffers 3414 * is as at least much as the IP header would have us expect. 3415 * Drop packet if shorter than we expect. 3416 */ 3417 if (m->m_pkthdr.len < len) { 3418 KMOD_IPSTAT_INC(ips_tooshort); 3419 goto bad; 3420 } 3421 3422 /* Checks out, proceed */ 3423 *mp = m; 3424 return (0); 3425 3426 bad: 3427 *mp = m; 3428 return (-1); 3429 } 3430 3431 #ifdef INET6 3432 /* 3433 * Same as above, but for IPv6. 3434 * Cut-and-pasted from ip6_input.c. 3435 * XXX Should we update ip6stat, or not? 3436 */ 3437 static int 3438 bridge_ip6_checkbasic(struct mbuf **mp) 3439 { 3440 struct mbuf *m = *mp; 3441 struct ip6_hdr *ip6; 3442 3443 /* 3444 * If the IPv6 header is not aligned, slurp it up into a new 3445 * mbuf with space for link headers, in the event we forward 3446 * it. Otherwise, if it is aligned, make sure the entire base 3447 * IPv6 header is in the first mbuf of the chain. 3448 */ 3449 if (IP6_HDR_ALIGNED_P(mtod(m, caddr_t)) == 0) { 3450 struct ifnet *inifp = m->m_pkthdr.rcvif; 3451 if ((m = m_copyup(m, sizeof(struct ip6_hdr), 3452 (max_linkhdr + 3) & ~3)) == NULL) { 3453 /* XXXJRT new stat, please */ 3454 IP6STAT_INC(ip6s_toosmall); 3455 in6_ifstat_inc(inifp, ifs6_in_hdrerr); 3456 goto bad; 3457 } 3458 } else if (__predict_false(m->m_len < sizeof(struct ip6_hdr))) { 3459 struct ifnet *inifp = m->m_pkthdr.rcvif; 3460 if ((m = m_pullup(m, sizeof(struct ip6_hdr))) == NULL) { 3461 IP6STAT_INC(ip6s_toosmall); 3462 in6_ifstat_inc(inifp, ifs6_in_hdrerr); 3463 goto bad; 3464 } 3465 } 3466 3467 ip6 = mtod(m, struct ip6_hdr *); 3468 3469 if ((ip6->ip6_vfc & IPV6_VERSION_MASK) != IPV6_VERSION) { 3470 IP6STAT_INC(ip6s_badvers); 3471 in6_ifstat_inc(m->m_pkthdr.rcvif, ifs6_in_hdrerr); 3472 goto bad; 3473 } 3474 3475 /* Checks out, proceed */ 3476 *mp = m; 3477 return (0); 3478 3479 bad: 3480 *mp = m; 3481 return (-1); 3482 } 3483 #endif /* INET6 */ 3484 3485 /* 3486 * bridge_fragment: 3487 * 3488 * Fragment mbuf chain in multiple packets and prepend ethernet header. 3489 */ 3490 static int 3491 bridge_fragment(struct ifnet *ifp, struct mbuf **mp, struct ether_header *eh, 3492 int snap, struct llc *llc) 3493 { 3494 struct mbuf *m = *mp, *nextpkt = NULL, *mprev = NULL, *mcur = NULL; 3495 struct ip *ip; 3496 int error = -1; 3497 3498 if (m->m_len < sizeof(struct ip) && 3499 (m = m_pullup(m, sizeof(struct ip))) == NULL) 3500 goto dropit; 3501 ip = mtod(m, struct ip *); 3502 3503 m->m_pkthdr.csum_flags |= CSUM_IP; 3504 error = ip_fragment(ip, &m, ifp->if_mtu, ifp->if_hwassist); 3505 if (error) 3506 goto dropit; 3507 3508 /* 3509 * Walk the chain and re-add the Ethernet header for 3510 * each mbuf packet. 3511 */ 3512 for (mcur = m; mcur; mcur = mcur->m_nextpkt) { 3513 nextpkt = mcur->m_nextpkt; 3514 mcur->m_nextpkt = NULL; 3515 if (snap) { 3516 M_PREPEND(mcur, sizeof(struct llc), M_NOWAIT); 3517 if (mcur == NULL) { 3518 error = ENOBUFS; 3519 if (mprev != NULL) 3520 mprev->m_nextpkt = nextpkt; 3521 goto dropit; 3522 } 3523 bcopy(llc, mtod(mcur, caddr_t),sizeof(struct llc)); 3524 } 3525 3526 M_PREPEND(mcur, ETHER_HDR_LEN, M_NOWAIT); 3527 if (mcur == NULL) { 3528 error = ENOBUFS; 3529 if (mprev != NULL) 3530 mprev->m_nextpkt = nextpkt; 3531 goto dropit; 3532 } 3533 bcopy(eh, mtod(mcur, caddr_t), ETHER_HDR_LEN); 3534 3535 /* 3536 * The previous two M_PREPEND could have inserted one or two 3537 * mbufs in front so we have to update the previous packet's 3538 * m_nextpkt. 3539 */ 3540 mcur->m_nextpkt = nextpkt; 3541 if (mprev != NULL) 3542 mprev->m_nextpkt = mcur; 3543 else { 3544 /* The first mbuf in the original chain needs to be 3545 * updated. */ 3546 *mp = mcur; 3547 } 3548 mprev = mcur; 3549 } 3550 3551 KMOD_IPSTAT_INC(ips_fragmented); 3552 return (error); 3553 3554 dropit: 3555 for (mcur = *mp; mcur; mcur = m) { /* droping the full packet chain */ 3556 m = mcur->m_nextpkt; 3557 m_freem(mcur); 3558 } 3559 return (error); 3560 } 3561 3562 static void 3563 bridge_linkstate(struct ifnet *ifp) 3564 { 3565 struct bridge_softc *sc = ifp->if_bridge; 3566 struct bridge_iflist *bif; 3567 3568 BRIDGE_LOCK(sc); 3569 bif = bridge_lookup_member_if(sc, ifp); 3570 if (bif == NULL) { 3571 BRIDGE_UNLOCK(sc); 3572 return; 3573 } 3574 bridge_linkcheck(sc); 3575 BRIDGE_UNLOCK(sc); 3576 3577 bstp_linkstate(&bif->bif_stp); 3578 } 3579 3580 static void 3581 bridge_linkcheck(struct bridge_softc *sc) 3582 { 3583 struct bridge_iflist *bif; 3584 int new_link, hasls; 3585 3586 BRIDGE_LOCK_ASSERT(sc); 3587 new_link = LINK_STATE_DOWN; 3588 hasls = 0; 3589 /* Our link is considered up if at least one of our ports is active */ 3590 LIST_FOREACH(bif, &sc->sc_iflist, bif_next) { 3591 if (bif->bif_ifp->if_capabilities & IFCAP_LINKSTATE) 3592 hasls++; 3593 if (bif->bif_ifp->if_link_state == LINK_STATE_UP) { 3594 new_link = LINK_STATE_UP; 3595 break; 3596 } 3597 } 3598 if (!LIST_EMPTY(&sc->sc_iflist) && !hasls) { 3599 /* If no interfaces support link-state then we default to up */ 3600 new_link = LINK_STATE_UP; 3601 } 3602 if_link_state_change(sc->sc_ifp, new_link); 3603 } 3604