1 /* $NetBSD: if_bridge.c,v 1.31 2005/06/01 19:45:34 jdc Exp $ */ 2 3 /* 4 * Copyright 2001 Wasabi Systems, Inc. 5 * All rights reserved. 6 * 7 * Written by Jason R. Thorpe for Wasabi Systems, Inc. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 3. All advertising materials mentioning features or use of this software 18 * must display the following acknowledgement: 19 * This product includes software developed for the NetBSD Project by 20 * Wasabi Systems, Inc. 21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse 22 * or promote products derived from this software without specific prior 23 * written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND 26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC 29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 35 * POSSIBILITY OF SUCH DAMAGE. 36 */ 37 38 /* 39 * Copyright (c) 1999, 2000 Jason L. Wright (jason@thought.net) 40 * All rights reserved. 41 * 42 * Redistribution and use in source and binary forms, with or without 43 * modification, are permitted provided that the following conditions 44 * are met: 45 * 1. Redistributions of source code must retain the above copyright 46 * notice, this list of conditions and the following disclaimer. 47 * 2. Redistributions in binary form must reproduce the above copyright 48 * notice, this list of conditions and the following disclaimer in the 49 * documentation and/or other materials provided with the distribution. 50 * 51 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 52 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 53 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 54 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, 55 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 56 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 57 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 58 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 59 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN 60 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 61 * POSSIBILITY OF SUCH DAMAGE. 62 * 63 * OpenBSD: if_bridge.c,v 1.60 2001/06/15 03:38:33 itojun Exp 64 */ 65 66 /* 67 * Network interface bridge support. 68 * 69 * TODO: 70 * 71 * - Currently only supports Ethernet-like interfaces (Ethernet, 72 * 802.11, VLANs on Ethernet, etc.) Figure out a nice way 73 * to bridge other types of interfaces (FDDI-FDDI, and maybe 74 * consider heterogenous bridges). 75 */ 76 77 #include <sys/cdefs.h> 78 __FBSDID("$FreeBSD$"); 79 80 #include "opt_inet.h" 81 #include "opt_inet6.h" 82 #include "opt_carp.h" 83 84 #include <sys/param.h> 85 #include <sys/mbuf.h> 86 #include <sys/malloc.h> 87 #include <sys/protosw.h> 88 #include <sys/systm.h> 89 #include <sys/time.h> 90 #include <sys/socket.h> /* for net/if.h */ 91 #include <sys/sockio.h> 92 #include <sys/ctype.h> /* string functions */ 93 #include <sys/kernel.h> 94 #include <sys/random.h> 95 #include <sys/syslog.h> 96 #include <sys/sysctl.h> 97 #include <vm/uma.h> 98 #include <sys/module.h> 99 #include <sys/priv.h> 100 #include <sys/proc.h> 101 #include <sys/lock.h> 102 #include <sys/mutex.h> 103 104 #include <net/bpf.h> 105 #include <net/if.h> 106 #include <net/if_clone.h> 107 #include <net/if_dl.h> 108 #include <net/if_types.h> 109 #include <net/if_var.h> 110 #include <net/pfil.h> 111 112 #include <netinet/in.h> /* for struct arpcom */ 113 #include <netinet/in_systm.h> 114 #include <netinet/in_var.h> 115 #include <netinet/ip.h> 116 #include <netinet/ip_var.h> 117 #ifdef INET6 118 #include <netinet/ip6.h> 119 #include <netinet6/ip6_var.h> 120 #endif 121 #ifdef DEV_CARP 122 #include <netinet/ip_carp.h> 123 #endif 124 #include <machine/in_cksum.h> 125 #include <netinet/if_ether.h> /* for struct arpcom */ 126 #include <net/bridgestp.h> 127 #include <net/if_bridgevar.h> 128 #include <net/if_llc.h> 129 #include <net/if_vlan_var.h> 130 131 #include <net/route.h> 132 #include <netinet/ip_fw.h> 133 #include <netinet/ip_dummynet.h> 134 135 /* 136 * Size of the route hash table. Must be a power of two. 137 */ 138 #ifndef BRIDGE_RTHASH_SIZE 139 #define BRIDGE_RTHASH_SIZE 1024 140 #endif 141 142 #define BRIDGE_RTHASH_MASK (BRIDGE_RTHASH_SIZE - 1) 143 144 /* 145 * Maximum number of addresses to cache. 146 */ 147 #ifndef BRIDGE_RTABLE_MAX 148 #define BRIDGE_RTABLE_MAX 100 149 #endif 150 151 /* 152 * Timeout (in seconds) for entries learned dynamically. 153 */ 154 #ifndef BRIDGE_RTABLE_TIMEOUT 155 #define BRIDGE_RTABLE_TIMEOUT (20 * 60) /* same as ARP */ 156 #endif 157 158 /* 159 * Number of seconds between walks of the route list. 160 */ 161 #ifndef BRIDGE_RTABLE_PRUNE_PERIOD 162 #define BRIDGE_RTABLE_PRUNE_PERIOD (5 * 60) 163 #endif 164 165 /* 166 * List of capabilities to mask on the member interface. 167 */ 168 #define BRIDGE_IFCAPS_MASK IFCAP_TXCSUM 169 170 /* 171 * Bridge interface list entry. 172 */ 173 struct bridge_iflist { 174 LIST_ENTRY(bridge_iflist) bif_next; 175 struct ifnet *bif_ifp; /* member if */ 176 struct bstp_port bif_stp; /* STP state */ 177 uint32_t bif_flags; /* member if flags */ 178 int bif_mutecap; /* member muted caps */ 179 uint32_t bif_addrmax; /* max # of addresses */ 180 uint32_t bif_addrcnt; /* cur. # of addresses */ 181 uint32_t bif_addrexceeded;/* # of address violations */ 182 }; 183 184 /* 185 * Bridge route node. 186 */ 187 struct bridge_rtnode { 188 LIST_ENTRY(bridge_rtnode) brt_hash; /* hash table linkage */ 189 LIST_ENTRY(bridge_rtnode) brt_list; /* list linkage */ 190 struct bridge_iflist *brt_dst; /* destination if */ 191 unsigned long brt_expire; /* expiration time */ 192 uint8_t brt_flags; /* address flags */ 193 uint8_t brt_addr[ETHER_ADDR_LEN]; 194 uint16_t brt_vlan; /* vlan id */ 195 }; 196 #define brt_ifp brt_dst->bif_ifp 197 198 /* 199 * Software state for each bridge. 200 */ 201 struct bridge_softc { 202 struct ifnet *sc_ifp; /* make this an interface */ 203 LIST_ENTRY(bridge_softc) sc_list; 204 struct mtx sc_mtx; 205 struct cv sc_cv; 206 uint32_t sc_brtmax; /* max # of addresses */ 207 uint32_t sc_brtcnt; /* cur. # of addresses */ 208 uint32_t sc_brttimeout; /* rt timeout in seconds */ 209 struct callout sc_brcallout; /* bridge callout */ 210 uint32_t sc_iflist_ref; /* refcount for sc_iflist */ 211 uint32_t sc_iflist_xcnt; /* refcount for sc_iflist */ 212 LIST_HEAD(, bridge_iflist) sc_iflist; /* member interface list */ 213 LIST_HEAD(, bridge_rtnode) *sc_rthash; /* our forwarding table */ 214 LIST_HEAD(, bridge_rtnode) sc_rtlist; /* list version of above */ 215 uint32_t sc_rthash_key; /* key for hash */ 216 LIST_HEAD(, bridge_iflist) sc_spanlist; /* span ports list */ 217 struct bstp_state sc_stp; /* STP state */ 218 uint32_t sc_brtexceeded; /* # of cache drops */ 219 }; 220 221 static struct mtx bridge_list_mtx; 222 eventhandler_tag bridge_detach_cookie = NULL; 223 224 int bridge_rtable_prune_period = BRIDGE_RTABLE_PRUNE_PERIOD; 225 226 uma_zone_t bridge_rtnode_zone; 227 228 static int bridge_clone_create(struct if_clone *, int, caddr_t); 229 static void bridge_clone_destroy(struct ifnet *); 230 231 static int bridge_ioctl(struct ifnet *, u_long, caddr_t); 232 static void bridge_mutecaps(struct bridge_iflist *, int); 233 static void bridge_ifdetach(void *arg __unused, struct ifnet *); 234 static void bridge_init(void *); 235 static void bridge_dummynet(struct mbuf *, struct ifnet *); 236 static void bridge_stop(struct ifnet *, int); 237 static void bridge_start(struct ifnet *); 238 static struct mbuf *bridge_input(struct ifnet *, struct mbuf *); 239 static int bridge_output(struct ifnet *, struct mbuf *, struct sockaddr *, 240 struct rtentry *); 241 static void bridge_enqueue(struct bridge_softc *, struct ifnet *, 242 struct mbuf *); 243 static void bridge_rtdelete(struct bridge_softc *, struct ifnet *ifp, int); 244 245 static void bridge_forward(struct bridge_softc *, struct bridge_iflist *, 246 struct mbuf *m); 247 248 static void bridge_timer(void *); 249 250 static void bridge_broadcast(struct bridge_softc *, struct ifnet *, 251 struct mbuf *, int); 252 static void bridge_span(struct bridge_softc *, struct mbuf *); 253 254 static int bridge_rtupdate(struct bridge_softc *, const uint8_t *, 255 uint16_t, struct bridge_iflist *, int, uint8_t); 256 static struct ifnet *bridge_rtlookup(struct bridge_softc *, const uint8_t *, 257 uint16_t); 258 static void bridge_rttrim(struct bridge_softc *); 259 static void bridge_rtage(struct bridge_softc *); 260 static void bridge_rtflush(struct bridge_softc *, int); 261 static int bridge_rtdaddr(struct bridge_softc *, const uint8_t *, 262 uint16_t); 263 264 static int bridge_rtable_init(struct bridge_softc *); 265 static void bridge_rtable_fini(struct bridge_softc *); 266 267 static int bridge_rtnode_addr_cmp(const uint8_t *, const uint8_t *); 268 static struct bridge_rtnode *bridge_rtnode_lookup(struct bridge_softc *, 269 const uint8_t *, uint16_t); 270 static int bridge_rtnode_insert(struct bridge_softc *, 271 struct bridge_rtnode *); 272 static void bridge_rtnode_destroy(struct bridge_softc *, 273 struct bridge_rtnode *); 274 static void bridge_rtable_expire(struct ifnet *, int); 275 static void bridge_state_change(struct ifnet *, int); 276 277 static struct bridge_iflist *bridge_lookup_member(struct bridge_softc *, 278 const char *name); 279 static struct bridge_iflist *bridge_lookup_member_if(struct bridge_softc *, 280 struct ifnet *ifp); 281 static void bridge_delete_member(struct bridge_softc *, 282 struct bridge_iflist *, int); 283 static void bridge_delete_span(struct bridge_softc *, 284 struct bridge_iflist *); 285 286 static int bridge_ioctl_add(struct bridge_softc *, void *); 287 static int bridge_ioctl_del(struct bridge_softc *, void *); 288 static int bridge_ioctl_gifflags(struct bridge_softc *, void *); 289 static int bridge_ioctl_sifflags(struct bridge_softc *, void *); 290 static int bridge_ioctl_scache(struct bridge_softc *, void *); 291 static int bridge_ioctl_gcache(struct bridge_softc *, void *); 292 static int bridge_ioctl_gifs(struct bridge_softc *, void *); 293 static int bridge_ioctl_rts(struct bridge_softc *, void *); 294 static int bridge_ioctl_saddr(struct bridge_softc *, void *); 295 static int bridge_ioctl_sto(struct bridge_softc *, void *); 296 static int bridge_ioctl_gto(struct bridge_softc *, void *); 297 static int bridge_ioctl_daddr(struct bridge_softc *, void *); 298 static int bridge_ioctl_flush(struct bridge_softc *, void *); 299 static int bridge_ioctl_gpri(struct bridge_softc *, void *); 300 static int bridge_ioctl_spri(struct bridge_softc *, void *); 301 static int bridge_ioctl_ght(struct bridge_softc *, void *); 302 static int bridge_ioctl_sht(struct bridge_softc *, void *); 303 static int bridge_ioctl_gfd(struct bridge_softc *, void *); 304 static int bridge_ioctl_sfd(struct bridge_softc *, void *); 305 static int bridge_ioctl_gma(struct bridge_softc *, void *); 306 static int bridge_ioctl_sma(struct bridge_softc *, void *); 307 static int bridge_ioctl_sifprio(struct bridge_softc *, void *); 308 static int bridge_ioctl_sifcost(struct bridge_softc *, void *); 309 static int bridge_ioctl_sifmaxaddr(struct bridge_softc *, void *); 310 static int bridge_ioctl_addspan(struct bridge_softc *, void *); 311 static int bridge_ioctl_delspan(struct bridge_softc *, void *); 312 static int bridge_ioctl_gbparam(struct bridge_softc *, void *); 313 static int bridge_ioctl_grte(struct bridge_softc *, void *); 314 static int bridge_ioctl_gifsstp(struct bridge_softc *, void *); 315 static int bridge_ioctl_sproto(struct bridge_softc *, void *); 316 static int bridge_ioctl_stxhc(struct bridge_softc *, void *); 317 static int bridge_pfil(struct mbuf **, struct ifnet *, struct ifnet *, 318 int); 319 static int bridge_ip_checkbasic(struct mbuf **mp); 320 #ifdef INET6 321 static int bridge_ip6_checkbasic(struct mbuf **mp); 322 #endif /* INET6 */ 323 static int bridge_fragment(struct ifnet *, struct mbuf *, 324 struct ether_header *, int, struct llc *); 325 326 /* The default bridge vlan is 1 (IEEE 802.1Q-2003 Table 9-2) */ 327 #define VLANTAGOF(_m) \ 328 (_m->m_flags & M_VLANTAG) ? EVL_VLANOFTAG(_m->m_pkthdr.ether_vtag) : 1 329 330 static struct bstp_cb_ops bridge_ops = { 331 .bcb_state = bridge_state_change, 332 .bcb_rtage = bridge_rtable_expire 333 }; 334 335 SYSCTL_DECL(_net_link); 336 SYSCTL_NODE(_net_link, IFT_BRIDGE, bridge, CTLFLAG_RW, 0, "Bridge"); 337 338 static int pfil_onlyip = 1; /* only pass IP[46] packets when pfil is enabled */ 339 static int pfil_bridge = 1; /* run pfil hooks on the bridge interface */ 340 static int pfil_member = 1; /* run pfil hooks on the member interface */ 341 static int pfil_ipfw = 0; /* layer2 filter with ipfw */ 342 static int pfil_ipfw_arp = 0; /* layer2 filter with ipfw */ 343 static int pfil_local_phys = 0; /* run pfil hooks on the physical interface for 344 locally destined packets */ 345 static int log_stp = 0; /* log STP state changes */ 346 SYSCTL_INT(_net_link_bridge, OID_AUTO, pfil_onlyip, CTLFLAG_RW, 347 &pfil_onlyip, 0, "Only pass IP packets when pfil is enabled"); 348 SYSCTL_INT(_net_link_bridge, OID_AUTO, ipfw_arp, CTLFLAG_RW, 349 &pfil_ipfw_arp, 0, "Filter ARP packets through IPFW layer2"); 350 SYSCTL_INT(_net_link_bridge, OID_AUTO, pfil_bridge, CTLFLAG_RW, 351 &pfil_bridge, 0, "Packet filter on the bridge interface"); 352 SYSCTL_INT(_net_link_bridge, OID_AUTO, pfil_member, CTLFLAG_RW, 353 &pfil_member, 0, "Packet filter on the member interface"); 354 SYSCTL_INT(_net_link_bridge, OID_AUTO, pfil_local_phys, CTLFLAG_RW, 355 &pfil_local_phys, 0, 356 "Packet filter on the physical interface for locally destined packets"); 357 SYSCTL_INT(_net_link_bridge, OID_AUTO, log_stp, CTLFLAG_RW, 358 &log_stp, 0, "Log STP state changes"); 359 360 struct bridge_control { 361 int (*bc_func)(struct bridge_softc *, void *); 362 int bc_argsize; 363 int bc_flags; 364 }; 365 366 #define BC_F_COPYIN 0x01 /* copy arguments in */ 367 #define BC_F_COPYOUT 0x02 /* copy arguments out */ 368 #define BC_F_SUSER 0x04 /* do super-user check */ 369 370 const struct bridge_control bridge_control_table[] = { 371 { bridge_ioctl_add, sizeof(struct ifbreq), 372 BC_F_COPYIN|BC_F_SUSER }, 373 { bridge_ioctl_del, sizeof(struct ifbreq), 374 BC_F_COPYIN|BC_F_SUSER }, 375 376 { bridge_ioctl_gifflags, sizeof(struct ifbreq), 377 BC_F_COPYIN|BC_F_COPYOUT }, 378 { bridge_ioctl_sifflags, sizeof(struct ifbreq), 379 BC_F_COPYIN|BC_F_SUSER }, 380 381 { bridge_ioctl_scache, sizeof(struct ifbrparam), 382 BC_F_COPYIN|BC_F_SUSER }, 383 { bridge_ioctl_gcache, sizeof(struct ifbrparam), 384 BC_F_COPYOUT }, 385 386 { bridge_ioctl_gifs, sizeof(struct ifbifconf), 387 BC_F_COPYIN|BC_F_COPYOUT }, 388 { bridge_ioctl_rts, sizeof(struct ifbaconf), 389 BC_F_COPYIN|BC_F_COPYOUT }, 390 391 { bridge_ioctl_saddr, sizeof(struct ifbareq), 392 BC_F_COPYIN|BC_F_SUSER }, 393 394 { bridge_ioctl_sto, sizeof(struct ifbrparam), 395 BC_F_COPYIN|BC_F_SUSER }, 396 { bridge_ioctl_gto, sizeof(struct ifbrparam), 397 BC_F_COPYOUT }, 398 399 { bridge_ioctl_daddr, sizeof(struct ifbareq), 400 BC_F_COPYIN|BC_F_SUSER }, 401 402 { bridge_ioctl_flush, sizeof(struct ifbreq), 403 BC_F_COPYIN|BC_F_SUSER }, 404 405 { bridge_ioctl_gpri, sizeof(struct ifbrparam), 406 BC_F_COPYOUT }, 407 { bridge_ioctl_spri, sizeof(struct ifbrparam), 408 BC_F_COPYIN|BC_F_SUSER }, 409 410 { bridge_ioctl_ght, sizeof(struct ifbrparam), 411 BC_F_COPYOUT }, 412 { bridge_ioctl_sht, sizeof(struct ifbrparam), 413 BC_F_COPYIN|BC_F_SUSER }, 414 415 { bridge_ioctl_gfd, sizeof(struct ifbrparam), 416 BC_F_COPYOUT }, 417 { bridge_ioctl_sfd, sizeof(struct ifbrparam), 418 BC_F_COPYIN|BC_F_SUSER }, 419 420 { bridge_ioctl_gma, sizeof(struct ifbrparam), 421 BC_F_COPYOUT }, 422 { bridge_ioctl_sma, sizeof(struct ifbrparam), 423 BC_F_COPYIN|BC_F_SUSER }, 424 425 { bridge_ioctl_sifprio, sizeof(struct ifbreq), 426 BC_F_COPYIN|BC_F_SUSER }, 427 428 { bridge_ioctl_sifcost, sizeof(struct ifbreq), 429 BC_F_COPYIN|BC_F_SUSER }, 430 431 { bridge_ioctl_addspan, sizeof(struct ifbreq), 432 BC_F_COPYIN|BC_F_SUSER }, 433 { bridge_ioctl_delspan, sizeof(struct ifbreq), 434 BC_F_COPYIN|BC_F_SUSER }, 435 436 { bridge_ioctl_gbparam, sizeof(struct ifbropreq), 437 BC_F_COPYOUT }, 438 439 { bridge_ioctl_grte, sizeof(struct ifbrparam), 440 BC_F_COPYOUT }, 441 442 { bridge_ioctl_gifsstp, sizeof(struct ifbpstpconf), 443 BC_F_COPYIN|BC_F_COPYOUT }, 444 445 { bridge_ioctl_sproto, sizeof(struct ifbrparam), 446 BC_F_COPYIN|BC_F_SUSER }, 447 448 { bridge_ioctl_stxhc, sizeof(struct ifbrparam), 449 BC_F_COPYIN|BC_F_SUSER }, 450 451 { bridge_ioctl_sifmaxaddr, sizeof(struct ifbreq), 452 BC_F_COPYIN|BC_F_SUSER }, 453 454 }; 455 const int bridge_control_table_size = 456 sizeof(bridge_control_table) / sizeof(bridge_control_table[0]); 457 458 LIST_HEAD(, bridge_softc) bridge_list; 459 460 IFC_SIMPLE_DECLARE(bridge, 0); 461 462 static int 463 bridge_modevent(module_t mod, int type, void *data) 464 { 465 466 switch (type) { 467 case MOD_LOAD: 468 mtx_init(&bridge_list_mtx, "if_bridge list", NULL, MTX_DEF); 469 if_clone_attach(&bridge_cloner); 470 bridge_rtnode_zone = uma_zcreate("bridge_rtnode", 471 sizeof(struct bridge_rtnode), NULL, NULL, NULL, NULL, 472 UMA_ALIGN_PTR, 0); 473 LIST_INIT(&bridge_list); 474 bridge_input_p = bridge_input; 475 bridge_output_p = bridge_output; 476 bridge_dn_p = bridge_dummynet; 477 bridge_detach_cookie = EVENTHANDLER_REGISTER( 478 ifnet_departure_event, bridge_ifdetach, NULL, 479 EVENTHANDLER_PRI_ANY); 480 break; 481 case MOD_UNLOAD: 482 EVENTHANDLER_DEREGISTER(ifnet_departure_event, 483 bridge_detach_cookie); 484 if_clone_detach(&bridge_cloner); 485 uma_zdestroy(bridge_rtnode_zone); 486 bridge_input_p = NULL; 487 bridge_output_p = NULL; 488 bridge_dn_p = NULL; 489 mtx_destroy(&bridge_list_mtx); 490 break; 491 default: 492 return (EOPNOTSUPP); 493 } 494 return (0); 495 } 496 497 static moduledata_t bridge_mod = { 498 "if_bridge", 499 bridge_modevent, 500 0 501 }; 502 503 DECLARE_MODULE(if_bridge, bridge_mod, SI_SUB_PSEUDO, SI_ORDER_ANY); 504 MODULE_DEPEND(if_bridge, bridgestp, 1, 1, 1); 505 506 /* 507 * handler for net.link.bridge.pfil_ipfw 508 */ 509 static int 510 sysctl_pfil_ipfw(SYSCTL_HANDLER_ARGS) 511 { 512 int enable = pfil_ipfw; 513 int error; 514 515 error = sysctl_handle_int(oidp, &enable, 0, req); 516 enable = (enable) ? 1 : 0; 517 518 if (enable != pfil_ipfw) { 519 pfil_ipfw = enable; 520 521 /* 522 * Disable pfil so that ipfw doesnt run twice, if the user 523 * really wants both then they can re-enable pfil_bridge and/or 524 * pfil_member. Also allow non-ip packets as ipfw can filter by 525 * layer2 type. 526 */ 527 if (pfil_ipfw) { 528 pfil_onlyip = 0; 529 pfil_bridge = 0; 530 pfil_member = 0; 531 } 532 } 533 534 return (error); 535 } 536 SYSCTL_PROC(_net_link_bridge, OID_AUTO, ipfw, CTLTYPE_INT|CTLFLAG_RW, 537 &pfil_ipfw, 0, &sysctl_pfil_ipfw, "I", "Layer2 filter with IPFW"); 538 539 /* 540 * bridge_clone_create: 541 * 542 * Create a new bridge instance. 543 */ 544 static int 545 bridge_clone_create(struct if_clone *ifc, int unit, caddr_t params) 546 { 547 struct bridge_softc *sc, *sc2; 548 struct ifnet *bifp, *ifp; 549 u_char eaddr[6]; 550 int retry; 551 552 sc = malloc(sizeof(*sc), M_DEVBUF, M_WAITOK|M_ZERO); 553 ifp = sc->sc_ifp = if_alloc(IFT_ETHER); 554 if (ifp == NULL) { 555 free(sc, M_DEVBUF); 556 return (ENOSPC); 557 } 558 559 BRIDGE_LOCK_INIT(sc); 560 sc->sc_brtmax = BRIDGE_RTABLE_MAX; 561 sc->sc_brttimeout = BRIDGE_RTABLE_TIMEOUT; 562 563 /* Initialize our routing table. */ 564 bridge_rtable_init(sc); 565 566 callout_init_mtx(&sc->sc_brcallout, &sc->sc_mtx, 0); 567 568 LIST_INIT(&sc->sc_iflist); 569 LIST_INIT(&sc->sc_spanlist); 570 571 ifp->if_softc = sc; 572 if_initname(ifp, ifc->ifc_name, unit); 573 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 574 ifp->if_ioctl = bridge_ioctl; 575 ifp->if_start = bridge_start; 576 ifp->if_init = bridge_init; 577 ifp->if_type = IFT_BRIDGE; 578 IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen); 579 ifp->if_snd.ifq_drv_maxlen = ifqmaxlen; 580 IFQ_SET_READY(&ifp->if_snd); 581 582 /* 583 * Generate a random ethernet address with a locally administered 584 * address. 585 * 586 * Since we are using random ethernet addresses for the bridge, it is 587 * possible that we might have address collisions, so make sure that 588 * this hardware address isn't already in use on another bridge. 589 */ 590 for (retry = 1; retry != 0;) { 591 arc4rand(eaddr, ETHER_ADDR_LEN, 1); 592 eaddr[0] &= ~1; /* clear multicast bit */ 593 eaddr[0] |= 2; /* set the LAA bit */ 594 retry = 0; 595 mtx_lock(&bridge_list_mtx); 596 LIST_FOREACH(sc2, &bridge_list, sc_list) { 597 bifp = sc2->sc_ifp; 598 if (memcmp(eaddr, IF_LLADDR(bifp), ETHER_ADDR_LEN) == 0) 599 retry = 1; 600 } 601 mtx_unlock(&bridge_list_mtx); 602 } 603 604 bstp_attach(&sc->sc_stp, &bridge_ops); 605 ether_ifattach(ifp, eaddr); 606 /* Now undo some of the damage... */ 607 ifp->if_baudrate = 0; 608 ifp->if_type = IFT_BRIDGE; 609 610 mtx_lock(&bridge_list_mtx); 611 LIST_INSERT_HEAD(&bridge_list, sc, sc_list); 612 mtx_unlock(&bridge_list_mtx); 613 614 return (0); 615 } 616 617 /* 618 * bridge_clone_destroy: 619 * 620 * Destroy a bridge instance. 621 */ 622 static void 623 bridge_clone_destroy(struct ifnet *ifp) 624 { 625 struct bridge_softc *sc = ifp->if_softc; 626 struct bridge_iflist *bif; 627 628 BRIDGE_LOCK(sc); 629 630 bridge_stop(ifp, 1); 631 ifp->if_flags &= ~IFF_UP; 632 633 while ((bif = LIST_FIRST(&sc->sc_iflist)) != NULL) 634 bridge_delete_member(sc, bif, 0); 635 636 while ((bif = LIST_FIRST(&sc->sc_spanlist)) != NULL) { 637 bridge_delete_span(sc, bif); 638 } 639 640 BRIDGE_UNLOCK(sc); 641 642 callout_drain(&sc->sc_brcallout); 643 644 mtx_lock(&bridge_list_mtx); 645 LIST_REMOVE(sc, sc_list); 646 mtx_unlock(&bridge_list_mtx); 647 648 bstp_detach(&sc->sc_stp); 649 ether_ifdetach(ifp); 650 if_free_type(ifp, IFT_ETHER); 651 652 /* Tear down the routing table. */ 653 bridge_rtable_fini(sc); 654 655 BRIDGE_LOCK_DESTROY(sc); 656 free(sc, M_DEVBUF); 657 } 658 659 /* 660 * bridge_ioctl: 661 * 662 * Handle a control request from the operator. 663 */ 664 static int 665 bridge_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 666 { 667 struct bridge_softc *sc = ifp->if_softc; 668 struct thread *td = curthread; 669 union { 670 struct ifbreq ifbreq; 671 struct ifbifconf ifbifconf; 672 struct ifbareq ifbareq; 673 struct ifbaconf ifbaconf; 674 struct ifbrparam ifbrparam; 675 struct ifbropreq ifbropreq; 676 } args; 677 struct ifdrv *ifd = (struct ifdrv *) data; 678 const struct bridge_control *bc; 679 int error = 0; 680 681 switch (cmd) { 682 683 case SIOCADDMULTI: 684 case SIOCDELMULTI: 685 break; 686 687 case SIOCGDRVSPEC: 688 case SIOCSDRVSPEC: 689 if (ifd->ifd_cmd >= bridge_control_table_size) { 690 error = EINVAL; 691 break; 692 } 693 bc = &bridge_control_table[ifd->ifd_cmd]; 694 695 if (cmd == SIOCGDRVSPEC && 696 (bc->bc_flags & BC_F_COPYOUT) == 0) { 697 error = EINVAL; 698 break; 699 } 700 else if (cmd == SIOCSDRVSPEC && 701 (bc->bc_flags & BC_F_COPYOUT) != 0) { 702 error = EINVAL; 703 break; 704 } 705 706 if (bc->bc_flags & BC_F_SUSER) { 707 error = priv_check(td, PRIV_NET_BRIDGE); 708 if (error) 709 break; 710 } 711 712 if (ifd->ifd_len != bc->bc_argsize || 713 ifd->ifd_len > sizeof(args)) { 714 error = EINVAL; 715 break; 716 } 717 718 bzero(&args, sizeof(args)); 719 if (bc->bc_flags & BC_F_COPYIN) { 720 error = copyin(ifd->ifd_data, &args, ifd->ifd_len); 721 if (error) 722 break; 723 } 724 725 BRIDGE_LOCK(sc); 726 error = (*bc->bc_func)(sc, &args); 727 BRIDGE_UNLOCK(sc); 728 if (error) 729 break; 730 731 if (bc->bc_flags & BC_F_COPYOUT) 732 error = copyout(&args, ifd->ifd_data, ifd->ifd_len); 733 734 break; 735 736 case SIOCSIFFLAGS: 737 if (!(ifp->if_flags & IFF_UP) && 738 (ifp->if_drv_flags & IFF_DRV_RUNNING)) { 739 /* 740 * If interface is marked down and it is running, 741 * then stop and disable it. 742 */ 743 BRIDGE_LOCK(sc); 744 bridge_stop(ifp, 1); 745 BRIDGE_UNLOCK(sc); 746 } else if ((ifp->if_flags & IFF_UP) && 747 !(ifp->if_drv_flags & IFF_DRV_RUNNING)) { 748 /* 749 * If interface is marked up and it is stopped, then 750 * start it. 751 */ 752 (*ifp->if_init)(sc); 753 } 754 break; 755 756 case SIOCSIFMTU: 757 /* Do not allow the MTU to be changed on the bridge */ 758 error = EINVAL; 759 break; 760 761 default: 762 /* 763 * drop the lock as ether_ioctl() will call bridge_start() and 764 * cause the lock to be recursed. 765 */ 766 error = ether_ioctl(ifp, cmd, data); 767 break; 768 } 769 770 return (error); 771 } 772 773 /* 774 * bridge_mutecaps: 775 * 776 * Clear or restore unwanted capabilities on the member interface 777 */ 778 static void 779 bridge_mutecaps(struct bridge_iflist *bif, int mute) 780 { 781 struct ifnet *ifp = bif->bif_ifp; 782 struct ifreq ifr; 783 int error; 784 785 if (ifp->if_ioctl == NULL) 786 return; 787 788 bzero(&ifr, sizeof(ifr)); 789 ifr.ifr_reqcap = ifp->if_capenable; 790 791 if (mute) { 792 /* mask off and save capabilities */ 793 bif->bif_mutecap = ifr.ifr_reqcap & BRIDGE_IFCAPS_MASK; 794 if (bif->bif_mutecap != 0) 795 ifr.ifr_reqcap &= ~BRIDGE_IFCAPS_MASK; 796 } else 797 /* restore muted capabilities */ 798 ifr.ifr_reqcap |= bif->bif_mutecap; 799 800 801 if (bif->bif_mutecap != 0) { 802 IFF_LOCKGIANT(ifp); 803 error = (*ifp->if_ioctl)(ifp, SIOCSIFCAP, (caddr_t)&ifr); 804 IFF_UNLOCKGIANT(ifp); 805 } 806 } 807 808 /* 809 * bridge_lookup_member: 810 * 811 * Lookup a bridge member interface. 812 */ 813 static struct bridge_iflist * 814 bridge_lookup_member(struct bridge_softc *sc, const char *name) 815 { 816 struct bridge_iflist *bif; 817 struct ifnet *ifp; 818 819 BRIDGE_LOCK_ASSERT(sc); 820 821 LIST_FOREACH(bif, &sc->sc_iflist, bif_next) { 822 ifp = bif->bif_ifp; 823 if (strcmp(ifp->if_xname, name) == 0) 824 return (bif); 825 } 826 827 return (NULL); 828 } 829 830 /* 831 * bridge_lookup_member_if: 832 * 833 * Lookup a bridge member interface by ifnet*. 834 */ 835 static struct bridge_iflist * 836 bridge_lookup_member_if(struct bridge_softc *sc, struct ifnet *member_ifp) 837 { 838 struct bridge_iflist *bif; 839 840 BRIDGE_LOCK_ASSERT(sc); 841 842 LIST_FOREACH(bif, &sc->sc_iflist, bif_next) { 843 if (bif->bif_ifp == member_ifp) 844 return (bif); 845 } 846 847 return (NULL); 848 } 849 850 /* 851 * bridge_delete_member: 852 * 853 * Delete the specified member interface. 854 */ 855 static void 856 bridge_delete_member(struct bridge_softc *sc, struct bridge_iflist *bif, 857 int gone) 858 { 859 struct ifnet *ifs = bif->bif_ifp; 860 861 BRIDGE_LOCK_ASSERT(sc); 862 863 if (!gone) { 864 switch (ifs->if_type) { 865 case IFT_ETHER: 866 case IFT_L2VLAN: 867 /* 868 * Take the interface out of promiscuous mode. 869 */ 870 (void) ifpromisc(ifs, 0); 871 bridge_mutecaps(bif, 0); 872 break; 873 874 case IFT_GIF: 875 break; 876 877 default: 878 #ifdef DIAGNOSTIC 879 panic("bridge_delete_member: impossible"); 880 #endif 881 break; 882 } 883 } 884 885 if (bif->bif_flags & IFBIF_STP) 886 bstp_disable(&bif->bif_stp); 887 888 ifs->if_bridge = NULL; 889 BRIDGE_XLOCK(sc); 890 LIST_REMOVE(bif, bif_next); 891 BRIDGE_XDROP(sc); 892 893 bridge_rtdelete(sc, ifs, IFBF_FLUSHALL); 894 KASSERT(bif->bif_addrcnt == 0, 895 ("%s: %d bridge routes referenced", __func__, bif->bif_addrcnt)); 896 897 BRIDGE_UNLOCK(sc); 898 bstp_destroy(&bif->bif_stp); /* prepare to free */ 899 BRIDGE_LOCK(sc); 900 free(bif, M_DEVBUF); 901 } 902 903 /* 904 * bridge_delete_span: 905 * 906 * Delete the specified span interface. 907 */ 908 static void 909 bridge_delete_span(struct bridge_softc *sc, struct bridge_iflist *bif) 910 { 911 BRIDGE_LOCK_ASSERT(sc); 912 913 KASSERT(bif->bif_ifp->if_bridge == NULL, 914 ("%s: not a span interface", __func__)); 915 916 LIST_REMOVE(bif, bif_next); 917 free(bif, M_DEVBUF); 918 } 919 920 static int 921 bridge_ioctl_add(struct bridge_softc *sc, void *arg) 922 { 923 struct ifbreq *req = arg; 924 struct bridge_iflist *bif = NULL; 925 struct ifnet *ifs; 926 int error = 0; 927 928 ifs = ifunit(req->ifbr_ifsname); 929 if (ifs == NULL) 930 return (ENOENT); 931 932 /* If it's in the span list, it can't be a member. */ 933 LIST_FOREACH(bif, &sc->sc_spanlist, bif_next) 934 if (ifs == bif->bif_ifp) 935 return (EBUSY); 936 937 /* Allow the first Ethernet member to define the MTU */ 938 if (ifs->if_type != IFT_GIF) { 939 if (LIST_EMPTY(&sc->sc_iflist)) 940 sc->sc_ifp->if_mtu = ifs->if_mtu; 941 else if (sc->sc_ifp->if_mtu != ifs->if_mtu) { 942 if_printf(sc->sc_ifp, "invalid MTU for %s\n", 943 ifs->if_xname); 944 return (EINVAL); 945 } 946 } 947 948 if (ifs->if_bridge == sc) 949 return (EEXIST); 950 951 if (ifs->if_bridge != NULL) 952 return (EBUSY); 953 954 bif = malloc(sizeof(*bif), M_DEVBUF, M_NOWAIT|M_ZERO); 955 if (bif == NULL) 956 return (ENOMEM); 957 958 bif->bif_ifp = ifs; 959 bif->bif_flags = IFBIF_LEARNING | IFBIF_DISCOVER; 960 961 switch (ifs->if_type) { 962 case IFT_ETHER: 963 case IFT_L2VLAN: 964 /* 965 * Place the interface into promiscuous mode. 966 */ 967 error = ifpromisc(ifs, 1); 968 if (error) 969 goto out; 970 971 bridge_mutecaps(bif, 1); 972 break; 973 974 case IFT_GIF: 975 break; 976 977 default: 978 error = EINVAL; 979 goto out; 980 } 981 982 ifs->if_bridge = sc; 983 bstp_create(&sc->sc_stp, &bif->bif_stp, bif->bif_ifp); 984 /* 985 * XXX: XLOCK HERE!?! 986 * 987 * NOTE: insert_***HEAD*** should be safe for the traversals. 988 */ 989 LIST_INSERT_HEAD(&sc->sc_iflist, bif, bif_next); 990 991 out: 992 if (error) { 993 if (bif != NULL) 994 free(bif, M_DEVBUF); 995 } 996 return (error); 997 } 998 999 static int 1000 bridge_ioctl_del(struct bridge_softc *sc, void *arg) 1001 { 1002 struct ifbreq *req = arg; 1003 struct bridge_iflist *bif; 1004 1005 bif = bridge_lookup_member(sc, req->ifbr_ifsname); 1006 if (bif == NULL) 1007 return (ENOENT); 1008 1009 bridge_delete_member(sc, bif, 0); 1010 1011 return (0); 1012 } 1013 1014 static int 1015 bridge_ioctl_gifflags(struct bridge_softc *sc, void *arg) 1016 { 1017 struct ifbreq *req = arg; 1018 struct bridge_iflist *bif; 1019 struct bstp_port *bp; 1020 1021 bif = bridge_lookup_member(sc, req->ifbr_ifsname); 1022 if (bif == NULL) 1023 return (ENOENT); 1024 1025 bp = &bif->bif_stp; 1026 req->ifbr_ifsflags = bif->bif_flags; 1027 req->ifbr_state = bp->bp_state; 1028 req->ifbr_priority = bp->bp_priority; 1029 req->ifbr_path_cost = bp->bp_path_cost; 1030 req->ifbr_portno = bif->bif_ifp->if_index & 0xfff; 1031 req->ifbr_proto = bp->bp_protover; 1032 req->ifbr_role = bp->bp_role; 1033 req->ifbr_stpflags = bp->bp_flags; 1034 req->ifbr_addrcnt = bif->bif_addrcnt; 1035 req->ifbr_addrmax = bif->bif_addrmax; 1036 req->ifbr_addrexceeded = bif->bif_addrexceeded; 1037 1038 /* Copy STP state options as flags */ 1039 if (bp->bp_operedge) 1040 req->ifbr_ifsflags |= IFBIF_BSTP_EDGE; 1041 if (bp->bp_flags & BSTP_PORT_AUTOEDGE) 1042 req->ifbr_ifsflags |= IFBIF_BSTP_AUTOEDGE; 1043 if (bp->bp_ptp_link) 1044 req->ifbr_ifsflags |= IFBIF_BSTP_PTP; 1045 if (bp->bp_flags & BSTP_PORT_AUTOPTP) 1046 req->ifbr_ifsflags |= IFBIF_BSTP_AUTOPTP; 1047 if (bp->bp_flags & BSTP_PORT_ADMEDGE) 1048 req->ifbr_ifsflags |= IFBIF_BSTP_ADMEDGE; 1049 if (bp->bp_flags & BSTP_PORT_ADMCOST) 1050 req->ifbr_ifsflags |= IFBIF_BSTP_ADMCOST; 1051 return (0); 1052 } 1053 1054 static int 1055 bridge_ioctl_sifflags(struct bridge_softc *sc, void *arg) 1056 { 1057 struct ifbreq *req = arg; 1058 struct bridge_iflist *bif; 1059 struct bstp_port *bp; 1060 int error; 1061 1062 bif = bridge_lookup_member(sc, req->ifbr_ifsname); 1063 if (bif == NULL) 1064 return (ENOENT); 1065 bp = &bif->bif_stp; 1066 1067 if (req->ifbr_ifsflags & IFBIF_SPAN) 1068 /* SPAN is readonly */ 1069 return (EINVAL); 1070 1071 if (req->ifbr_ifsflags & IFBIF_STP) { 1072 if ((bif->bif_flags & IFBIF_STP) == 0) { 1073 error = bstp_enable(&bif->bif_stp); 1074 if (error) 1075 return (error); 1076 } 1077 } else { 1078 if ((bif->bif_flags & IFBIF_STP) != 0) 1079 bstp_disable(&bif->bif_stp); 1080 } 1081 1082 /* Pass on STP flags */ 1083 bstp_set_edge(bp, req->ifbr_ifsflags & IFBIF_BSTP_EDGE ? 1 : 0); 1084 bstp_set_autoedge(bp, req->ifbr_ifsflags & IFBIF_BSTP_AUTOEDGE ? 1 : 0); 1085 bstp_set_ptp(bp, req->ifbr_ifsflags & IFBIF_BSTP_PTP ? 1 : 0); 1086 bstp_set_autoptp(bp, req->ifbr_ifsflags & IFBIF_BSTP_AUTOPTP ? 1 : 0); 1087 1088 /* Save the bits relating to the bridge */ 1089 bif->bif_flags = req->ifbr_ifsflags & IFBIFMASK; 1090 1091 return (0); 1092 } 1093 1094 static int 1095 bridge_ioctl_scache(struct bridge_softc *sc, void *arg) 1096 { 1097 struct ifbrparam *param = arg; 1098 1099 sc->sc_brtmax = param->ifbrp_csize; 1100 bridge_rttrim(sc); 1101 1102 return (0); 1103 } 1104 1105 static int 1106 bridge_ioctl_gcache(struct bridge_softc *sc, void *arg) 1107 { 1108 struct ifbrparam *param = arg; 1109 1110 param->ifbrp_csize = sc->sc_brtmax; 1111 1112 return (0); 1113 } 1114 1115 static int 1116 bridge_ioctl_gifs(struct bridge_softc *sc, void *arg) 1117 { 1118 struct ifbifconf *bifc = arg; 1119 struct bridge_iflist *bif; 1120 struct ifbreq breq; 1121 char *buf, *outbuf; 1122 int count, buflen, len, error = 0; 1123 1124 count = 0; 1125 LIST_FOREACH(bif, &sc->sc_iflist, bif_next) 1126 count++; 1127 LIST_FOREACH(bif, &sc->sc_spanlist, bif_next) 1128 count++; 1129 1130 buflen = sizeof(breq) * count; 1131 if (bifc->ifbic_len == 0) { 1132 bifc->ifbic_len = buflen; 1133 return (0); 1134 } 1135 BRIDGE_UNLOCK(sc); 1136 outbuf = malloc(buflen, M_TEMP, M_WAITOK | M_ZERO); 1137 BRIDGE_LOCK(sc); 1138 1139 count = 0; 1140 buf = outbuf; 1141 len = min(bifc->ifbic_len, buflen); 1142 bzero(&breq, sizeof(breq)); 1143 LIST_FOREACH(bif, &sc->sc_iflist, bif_next) { 1144 if (len < sizeof(breq)) 1145 break; 1146 1147 strlcpy(breq.ifbr_ifsname, bif->bif_ifp->if_xname, 1148 sizeof(breq.ifbr_ifsname)); 1149 /* Fill in the ifbreq structure */ 1150 error = bridge_ioctl_gifflags(sc, &breq); 1151 if (error) 1152 break; 1153 memcpy(buf, &breq, sizeof(breq)); 1154 count++; 1155 buf += sizeof(breq); 1156 len -= sizeof(breq); 1157 } 1158 LIST_FOREACH(bif, &sc->sc_spanlist, bif_next) { 1159 if (len < sizeof(breq)) 1160 break; 1161 1162 strlcpy(breq.ifbr_ifsname, bif->bif_ifp->if_xname, 1163 sizeof(breq.ifbr_ifsname)); 1164 breq.ifbr_ifsflags = bif->bif_flags; 1165 breq.ifbr_portno = bif->bif_ifp->if_index & 0xfff; 1166 memcpy(buf, &breq, sizeof(breq)); 1167 count++; 1168 buf += sizeof(breq); 1169 len -= sizeof(breq); 1170 } 1171 1172 BRIDGE_UNLOCK(sc); 1173 bifc->ifbic_len = sizeof(breq) * count; 1174 error = copyout(outbuf, bifc->ifbic_req, bifc->ifbic_len); 1175 BRIDGE_LOCK(sc); 1176 free(outbuf, M_TEMP); 1177 return (error); 1178 } 1179 1180 static int 1181 bridge_ioctl_rts(struct bridge_softc *sc, void *arg) 1182 { 1183 struct ifbaconf *bac = arg; 1184 struct bridge_rtnode *brt; 1185 struct ifbareq bareq; 1186 char *buf, *outbuf; 1187 int count, buflen, len, error = 0; 1188 1189 if (bac->ifbac_len == 0) 1190 return (0); 1191 1192 count = 0; 1193 LIST_FOREACH(brt, &sc->sc_rtlist, brt_list) 1194 count++; 1195 buflen = sizeof(bareq) * count; 1196 1197 BRIDGE_UNLOCK(sc); 1198 outbuf = malloc(buflen, M_TEMP, M_WAITOK | M_ZERO); 1199 BRIDGE_LOCK(sc); 1200 1201 count = 0; 1202 buf = outbuf; 1203 len = min(bac->ifbac_len, buflen); 1204 bzero(&bareq, sizeof(bareq)); 1205 LIST_FOREACH(brt, &sc->sc_rtlist, brt_list) { 1206 if (len < sizeof(bareq)) 1207 goto out; 1208 strlcpy(bareq.ifba_ifsname, brt->brt_ifp->if_xname, 1209 sizeof(bareq.ifba_ifsname)); 1210 memcpy(bareq.ifba_dst, brt->brt_addr, sizeof(brt->brt_addr)); 1211 bareq.ifba_vlan = brt->brt_vlan; 1212 if ((brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC && 1213 time_uptime < brt->brt_expire) 1214 bareq.ifba_expire = brt->brt_expire - time_uptime; 1215 else 1216 bareq.ifba_expire = 0; 1217 bareq.ifba_flags = brt->brt_flags; 1218 1219 memcpy(buf, &bareq, sizeof(bareq)); 1220 count++; 1221 buf += sizeof(bareq); 1222 len -= sizeof(bareq); 1223 } 1224 out: 1225 BRIDGE_UNLOCK(sc); 1226 bac->ifbac_len = sizeof(bareq) * count; 1227 error = copyout(outbuf, bac->ifbac_req, bac->ifbac_len); 1228 BRIDGE_LOCK(sc); 1229 free(outbuf, M_TEMP); 1230 return (error); 1231 } 1232 1233 static int 1234 bridge_ioctl_saddr(struct bridge_softc *sc, void *arg) 1235 { 1236 struct ifbareq *req = arg; 1237 struct bridge_iflist *bif; 1238 int error; 1239 1240 bif = bridge_lookup_member(sc, req->ifba_ifsname); 1241 if (bif == NULL) 1242 return (ENOENT); 1243 1244 error = bridge_rtupdate(sc, req->ifba_dst, req->ifba_vlan, bif, 1, 1245 req->ifba_flags); 1246 1247 return (error); 1248 } 1249 1250 static int 1251 bridge_ioctl_sto(struct bridge_softc *sc, void *arg) 1252 { 1253 struct ifbrparam *param = arg; 1254 1255 sc->sc_brttimeout = param->ifbrp_ctime; 1256 return (0); 1257 } 1258 1259 static int 1260 bridge_ioctl_gto(struct bridge_softc *sc, void *arg) 1261 { 1262 struct ifbrparam *param = arg; 1263 1264 param->ifbrp_ctime = sc->sc_brttimeout; 1265 return (0); 1266 } 1267 1268 static int 1269 bridge_ioctl_daddr(struct bridge_softc *sc, void *arg) 1270 { 1271 struct ifbareq *req = arg; 1272 1273 return (bridge_rtdaddr(sc, req->ifba_dst, req->ifba_vlan)); 1274 } 1275 1276 static int 1277 bridge_ioctl_flush(struct bridge_softc *sc, void *arg) 1278 { 1279 struct ifbreq *req = arg; 1280 1281 bridge_rtflush(sc, req->ifbr_ifsflags); 1282 return (0); 1283 } 1284 1285 static int 1286 bridge_ioctl_gpri(struct bridge_softc *sc, void *arg) 1287 { 1288 struct ifbrparam *param = arg; 1289 struct bstp_state *bs = &sc->sc_stp; 1290 1291 param->ifbrp_prio = bs->bs_bridge_priority; 1292 return (0); 1293 } 1294 1295 static int 1296 bridge_ioctl_spri(struct bridge_softc *sc, void *arg) 1297 { 1298 struct ifbrparam *param = arg; 1299 1300 return (bstp_set_priority(&sc->sc_stp, param->ifbrp_prio)); 1301 } 1302 1303 static int 1304 bridge_ioctl_ght(struct bridge_softc *sc, void *arg) 1305 { 1306 struct ifbrparam *param = arg; 1307 struct bstp_state *bs = &sc->sc_stp; 1308 1309 param->ifbrp_hellotime = bs->bs_bridge_htime >> 8; 1310 return (0); 1311 } 1312 1313 static int 1314 bridge_ioctl_sht(struct bridge_softc *sc, void *arg) 1315 { 1316 struct ifbrparam *param = arg; 1317 1318 return (bstp_set_htime(&sc->sc_stp, param->ifbrp_hellotime)); 1319 } 1320 1321 static int 1322 bridge_ioctl_gfd(struct bridge_softc *sc, void *arg) 1323 { 1324 struct ifbrparam *param = arg; 1325 struct bstp_state *bs = &sc->sc_stp; 1326 1327 param->ifbrp_fwddelay = bs->bs_bridge_fdelay >> 8; 1328 return (0); 1329 } 1330 1331 static int 1332 bridge_ioctl_sfd(struct bridge_softc *sc, void *arg) 1333 { 1334 struct ifbrparam *param = arg; 1335 1336 return (bstp_set_fdelay(&sc->sc_stp, param->ifbrp_fwddelay)); 1337 } 1338 1339 static int 1340 bridge_ioctl_gma(struct bridge_softc *sc, void *arg) 1341 { 1342 struct ifbrparam *param = arg; 1343 struct bstp_state *bs = &sc->sc_stp; 1344 1345 param->ifbrp_maxage = bs->bs_bridge_max_age >> 8; 1346 return (0); 1347 } 1348 1349 static int 1350 bridge_ioctl_sma(struct bridge_softc *sc, void *arg) 1351 { 1352 struct ifbrparam *param = arg; 1353 1354 return (bstp_set_maxage(&sc->sc_stp, param->ifbrp_maxage)); 1355 } 1356 1357 static int 1358 bridge_ioctl_sifprio(struct bridge_softc *sc, void *arg) 1359 { 1360 struct ifbreq *req = arg; 1361 struct bridge_iflist *bif; 1362 1363 bif = bridge_lookup_member(sc, req->ifbr_ifsname); 1364 if (bif == NULL) 1365 return (ENOENT); 1366 1367 return (bstp_set_port_priority(&bif->bif_stp, req->ifbr_priority)); 1368 } 1369 1370 static int 1371 bridge_ioctl_sifcost(struct bridge_softc *sc, void *arg) 1372 { 1373 struct ifbreq *req = arg; 1374 struct bridge_iflist *bif; 1375 1376 bif = bridge_lookup_member(sc, req->ifbr_ifsname); 1377 if (bif == NULL) 1378 return (ENOENT); 1379 1380 return (bstp_set_path_cost(&bif->bif_stp, req->ifbr_path_cost)); 1381 } 1382 1383 static int 1384 bridge_ioctl_sifmaxaddr(struct bridge_softc *sc, void *arg) 1385 { 1386 struct ifbreq *req = arg; 1387 struct bridge_iflist *bif; 1388 1389 bif = bridge_lookup_member(sc, req->ifbr_ifsname); 1390 if (bif == NULL) 1391 return (ENOENT); 1392 1393 bif->bif_addrmax = req->ifbr_addrmax; 1394 return (0); 1395 } 1396 1397 static int 1398 bridge_ioctl_addspan(struct bridge_softc *sc, void *arg) 1399 { 1400 struct ifbreq *req = arg; 1401 struct bridge_iflist *bif = NULL; 1402 struct ifnet *ifs; 1403 1404 ifs = ifunit(req->ifbr_ifsname); 1405 if (ifs == NULL) 1406 return (ENOENT); 1407 1408 LIST_FOREACH(bif, &sc->sc_spanlist, bif_next) 1409 if (ifs == bif->bif_ifp) 1410 return (EBUSY); 1411 1412 if (ifs->if_bridge != NULL) 1413 return (EBUSY); 1414 1415 switch (ifs->if_type) { 1416 case IFT_ETHER: 1417 case IFT_GIF: 1418 case IFT_L2VLAN: 1419 break; 1420 default: 1421 return (EINVAL); 1422 } 1423 1424 bif = malloc(sizeof(*bif), M_DEVBUF, M_NOWAIT|M_ZERO); 1425 if (bif == NULL) 1426 return (ENOMEM); 1427 1428 bif->bif_ifp = ifs; 1429 bif->bif_flags = IFBIF_SPAN; 1430 1431 LIST_INSERT_HEAD(&sc->sc_spanlist, bif, bif_next); 1432 1433 return (0); 1434 } 1435 1436 static int 1437 bridge_ioctl_delspan(struct bridge_softc *sc, void *arg) 1438 { 1439 struct ifbreq *req = arg; 1440 struct bridge_iflist *bif; 1441 struct ifnet *ifs; 1442 1443 ifs = ifunit(req->ifbr_ifsname); 1444 if (ifs == NULL) 1445 return (ENOENT); 1446 1447 LIST_FOREACH(bif, &sc->sc_spanlist, bif_next) 1448 if (ifs == bif->bif_ifp) 1449 break; 1450 1451 if (bif == NULL) 1452 return (ENOENT); 1453 1454 bridge_delete_span(sc, bif); 1455 1456 return (0); 1457 } 1458 1459 static int 1460 bridge_ioctl_gbparam(struct bridge_softc *sc, void *arg) 1461 { 1462 struct ifbropreq *req = arg; 1463 struct bstp_state *bs = &sc->sc_stp; 1464 struct bstp_port *root_port; 1465 1466 req->ifbop_maxage = bs->bs_bridge_max_age >> 8; 1467 req->ifbop_hellotime = bs->bs_bridge_htime >> 8; 1468 req->ifbop_fwddelay = bs->bs_bridge_fdelay >> 8; 1469 1470 root_port = bs->bs_root_port; 1471 if (root_port == NULL) 1472 req->ifbop_root_port = 0; 1473 else 1474 req->ifbop_root_port = root_port->bp_ifp->if_index; 1475 1476 req->ifbop_holdcount = bs->bs_txholdcount; 1477 req->ifbop_priority = bs->bs_bridge_priority; 1478 req->ifbop_protocol = bs->bs_protover; 1479 req->ifbop_root_path_cost = bs->bs_root_pv.pv_cost; 1480 req->ifbop_bridgeid = bs->bs_bridge_pv.pv_dbridge_id; 1481 req->ifbop_designated_root = bs->bs_root_pv.pv_root_id; 1482 req->ifbop_designated_bridge = bs->bs_root_pv.pv_dbridge_id; 1483 req->ifbop_last_tc_time.tv_sec = bs->bs_last_tc_time.tv_sec; 1484 req->ifbop_last_tc_time.tv_usec = bs->bs_last_tc_time.tv_usec; 1485 1486 return (0); 1487 } 1488 1489 static int 1490 bridge_ioctl_grte(struct bridge_softc *sc, void *arg) 1491 { 1492 struct ifbrparam *param = arg; 1493 1494 param->ifbrp_cexceeded = sc->sc_brtexceeded; 1495 return (0); 1496 } 1497 1498 static int 1499 bridge_ioctl_gifsstp(struct bridge_softc *sc, void *arg) 1500 { 1501 struct ifbpstpconf *bifstp = arg; 1502 struct bridge_iflist *bif; 1503 struct bstp_port *bp; 1504 struct ifbpstpreq bpreq; 1505 char *buf, *outbuf; 1506 int count, buflen, len, error = 0; 1507 1508 count = 0; 1509 LIST_FOREACH(bif, &sc->sc_iflist, bif_next) { 1510 if ((bif->bif_flags & IFBIF_STP) != 0) 1511 count++; 1512 } 1513 1514 buflen = sizeof(bpreq) * count; 1515 if (bifstp->ifbpstp_len == 0) { 1516 bifstp->ifbpstp_len = buflen; 1517 return (0); 1518 } 1519 1520 BRIDGE_UNLOCK(sc); 1521 outbuf = malloc(buflen, M_TEMP, M_WAITOK | M_ZERO); 1522 BRIDGE_LOCK(sc); 1523 1524 count = 0; 1525 buf = outbuf; 1526 len = min(bifstp->ifbpstp_len, buflen); 1527 bzero(&bpreq, sizeof(bpreq)); 1528 LIST_FOREACH(bif, &sc->sc_iflist, bif_next) { 1529 if (len < sizeof(bpreq)) 1530 break; 1531 1532 if ((bif->bif_flags & IFBIF_STP) == 0) 1533 continue; 1534 1535 bp = &bif->bif_stp; 1536 bpreq.ifbp_portno = bif->bif_ifp->if_index & 0xfff; 1537 bpreq.ifbp_fwd_trans = bp->bp_forward_transitions; 1538 bpreq.ifbp_design_cost = bp->bp_desg_pv.pv_cost; 1539 bpreq.ifbp_design_port = bp->bp_desg_pv.pv_port_id; 1540 bpreq.ifbp_design_bridge = bp->bp_desg_pv.pv_dbridge_id; 1541 bpreq.ifbp_design_root = bp->bp_desg_pv.pv_root_id; 1542 1543 memcpy(buf, &bpreq, sizeof(bpreq)); 1544 count++; 1545 buf += sizeof(bpreq); 1546 len -= sizeof(bpreq); 1547 } 1548 1549 BRIDGE_UNLOCK(sc); 1550 bifstp->ifbpstp_len = sizeof(bpreq) * count; 1551 error = copyout(outbuf, bifstp->ifbpstp_req, bifstp->ifbpstp_len); 1552 BRIDGE_LOCK(sc); 1553 free(outbuf, M_TEMP); 1554 return (error); 1555 } 1556 1557 static int 1558 bridge_ioctl_sproto(struct bridge_softc *sc, void *arg) 1559 { 1560 struct ifbrparam *param = arg; 1561 1562 return (bstp_set_protocol(&sc->sc_stp, param->ifbrp_proto)); 1563 } 1564 1565 static int 1566 bridge_ioctl_stxhc(struct bridge_softc *sc, void *arg) 1567 { 1568 struct ifbrparam *param = arg; 1569 1570 return (bstp_set_holdcount(&sc->sc_stp, param->ifbrp_txhc)); 1571 } 1572 1573 /* 1574 * bridge_ifdetach: 1575 * 1576 * Detach an interface from a bridge. Called when a member 1577 * interface is detaching. 1578 */ 1579 static void 1580 bridge_ifdetach(void *arg __unused, struct ifnet *ifp) 1581 { 1582 struct bridge_softc *sc = ifp->if_bridge; 1583 struct bridge_iflist *bif; 1584 1585 /* Check if the interface is a bridge member */ 1586 if (sc != NULL) { 1587 BRIDGE_LOCK(sc); 1588 1589 bif = bridge_lookup_member_if(sc, ifp); 1590 if (bif != NULL) 1591 bridge_delete_member(sc, bif, 1); 1592 1593 BRIDGE_UNLOCK(sc); 1594 return; 1595 } 1596 1597 /* Check if the interface is a span port */ 1598 mtx_lock(&bridge_list_mtx); 1599 LIST_FOREACH(sc, &bridge_list, sc_list) { 1600 BRIDGE_LOCK(sc); 1601 LIST_FOREACH(bif, &sc->sc_spanlist, bif_next) 1602 if (ifp == bif->bif_ifp) { 1603 bridge_delete_span(sc, bif); 1604 break; 1605 } 1606 1607 BRIDGE_UNLOCK(sc); 1608 } 1609 mtx_unlock(&bridge_list_mtx); 1610 } 1611 1612 /* 1613 * bridge_init: 1614 * 1615 * Initialize a bridge interface. 1616 */ 1617 static void 1618 bridge_init(void *xsc) 1619 { 1620 struct bridge_softc *sc = (struct bridge_softc *)xsc; 1621 struct ifnet *ifp = sc->sc_ifp; 1622 1623 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 1624 return; 1625 1626 BRIDGE_LOCK(sc); 1627 callout_reset(&sc->sc_brcallout, bridge_rtable_prune_period * hz, 1628 bridge_timer, sc); 1629 1630 ifp->if_drv_flags |= IFF_DRV_RUNNING; 1631 bstp_init(&sc->sc_stp); /* Initialize Spanning Tree */ 1632 1633 BRIDGE_UNLOCK(sc); 1634 } 1635 1636 /* 1637 * bridge_stop: 1638 * 1639 * Stop the bridge interface. 1640 */ 1641 static void 1642 bridge_stop(struct ifnet *ifp, int disable) 1643 { 1644 struct bridge_softc *sc = ifp->if_softc; 1645 1646 BRIDGE_LOCK_ASSERT(sc); 1647 1648 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) 1649 return; 1650 1651 callout_stop(&sc->sc_brcallout); 1652 bstp_stop(&sc->sc_stp); 1653 1654 bridge_rtflush(sc, IFBF_FLUSHDYN); 1655 1656 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1657 } 1658 1659 /* 1660 * bridge_enqueue: 1661 * 1662 * Enqueue a packet on a bridge member interface. 1663 * 1664 */ 1665 static void 1666 bridge_enqueue(struct bridge_softc *sc, struct ifnet *dst_ifp, struct mbuf *m) 1667 { 1668 int len, err = 0; 1669 short mflags; 1670 struct mbuf *m0; 1671 1672 len = m->m_pkthdr.len; 1673 mflags = m->m_flags; 1674 1675 /* We may be sending a fragment so traverse the mbuf */ 1676 for (; m; m = m0) { 1677 m0 = m->m_nextpkt; 1678 m->m_nextpkt = NULL; 1679 1680 /* 1681 * If underlying interface can not do VLAN tag insertion itself 1682 * then attach a packet tag that holds it. 1683 */ 1684 if ((m->m_flags & M_VLANTAG) && 1685 (dst_ifp->if_capenable & IFCAP_VLAN_HWTAGGING) == 0) { 1686 m = ether_vlanencap(m, m->m_pkthdr.ether_vtag); 1687 if (m == NULL) { 1688 if_printf(dst_ifp, 1689 "unable to prepend VLAN header\n"); 1690 dst_ifp->if_oerrors++; 1691 continue; 1692 } 1693 m->m_flags &= ~M_VLANTAG; 1694 } 1695 1696 if (err == 0) 1697 IFQ_ENQUEUE(&dst_ifp->if_snd, m, err); 1698 } 1699 1700 if (err == 0) { 1701 1702 sc->sc_ifp->if_opackets++; 1703 sc->sc_ifp->if_obytes += len; 1704 1705 dst_ifp->if_obytes += len; 1706 1707 if (mflags & M_MCAST) { 1708 sc->sc_ifp->if_omcasts++; 1709 dst_ifp->if_omcasts++; 1710 } 1711 } 1712 1713 if ((dst_ifp->if_drv_flags & IFF_DRV_OACTIVE) == 0) 1714 (*dst_ifp->if_start)(dst_ifp); 1715 } 1716 1717 /* 1718 * bridge_dummynet: 1719 * 1720 * Receive a queued packet from dummynet and pass it on to the output 1721 * interface. 1722 * 1723 * The mbuf has the Ethernet header already attached. 1724 */ 1725 static void 1726 bridge_dummynet(struct mbuf *m, struct ifnet *ifp) 1727 { 1728 struct bridge_softc *sc; 1729 1730 sc = ifp->if_bridge; 1731 1732 /* 1733 * The packet didnt originate from a member interface. This should only 1734 * ever happen if a member interface is removed while packets are 1735 * queued for it. 1736 */ 1737 if (sc == NULL) { 1738 m_freem(m); 1739 return; 1740 } 1741 1742 if (PFIL_HOOKED(&inet_pfil_hook) 1743 #ifdef INET6 1744 || PFIL_HOOKED(&inet6_pfil_hook) 1745 #endif 1746 ) { 1747 if (bridge_pfil(&m, sc->sc_ifp, ifp, PFIL_OUT) != 0) 1748 return; 1749 if (m == NULL) 1750 return; 1751 } 1752 1753 bridge_enqueue(sc, ifp, m); 1754 } 1755 1756 /* 1757 * bridge_output: 1758 * 1759 * Send output from a bridge member interface. This 1760 * performs the bridging function for locally originated 1761 * packets. 1762 * 1763 * The mbuf has the Ethernet header already attached. We must 1764 * enqueue or free the mbuf before returning. 1765 */ 1766 static int 1767 bridge_output(struct ifnet *ifp, struct mbuf *m, struct sockaddr *sa, 1768 struct rtentry *rt) 1769 { 1770 struct ether_header *eh; 1771 struct ifnet *dst_if; 1772 struct bridge_softc *sc; 1773 uint16_t vlan; 1774 1775 if (m->m_len < ETHER_HDR_LEN) { 1776 m = m_pullup(m, ETHER_HDR_LEN); 1777 if (m == NULL) 1778 return (0); 1779 } 1780 1781 eh = mtod(m, struct ether_header *); 1782 sc = ifp->if_bridge; 1783 vlan = VLANTAGOF(m); 1784 1785 BRIDGE_LOCK(sc); 1786 1787 /* 1788 * If bridge is down, but the original output interface is up, 1789 * go ahead and send out that interface. Otherwise, the packet 1790 * is dropped below. 1791 */ 1792 if ((sc->sc_ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { 1793 dst_if = ifp; 1794 goto sendunicast; 1795 } 1796 1797 /* 1798 * If the packet is a multicast, or we don't know a better way to 1799 * get there, send to all interfaces. 1800 */ 1801 if (ETHER_IS_MULTICAST(eh->ether_dhost)) 1802 dst_if = NULL; 1803 else 1804 dst_if = bridge_rtlookup(sc, eh->ether_dhost, vlan); 1805 if (dst_if == NULL) { 1806 struct bridge_iflist *bif; 1807 struct mbuf *mc; 1808 int error = 0, used = 0; 1809 1810 bridge_span(sc, m); 1811 1812 BRIDGE_LOCK2REF(sc, error); 1813 if (error) { 1814 m_freem(m); 1815 return (0); 1816 } 1817 1818 LIST_FOREACH(bif, &sc->sc_iflist, bif_next) { 1819 dst_if = bif->bif_ifp; 1820 1821 if (dst_if->if_type == IFT_GIF) 1822 continue; 1823 if ((dst_if->if_drv_flags & IFF_DRV_RUNNING) == 0) 1824 continue; 1825 1826 /* 1827 * If this is not the original output interface, 1828 * and the interface is participating in spanning 1829 * tree, make sure the port is in a state that 1830 * allows forwarding. 1831 */ 1832 if (dst_if != ifp && (bif->bif_flags & IFBIF_STP) && 1833 bif->bif_stp.bp_state == BSTP_IFSTATE_DISCARDING) 1834 continue; 1835 1836 if (LIST_NEXT(bif, bif_next) == NULL) { 1837 used = 1; 1838 mc = m; 1839 } else { 1840 mc = m_copypacket(m, M_DONTWAIT); 1841 if (mc == NULL) { 1842 sc->sc_ifp->if_oerrors++; 1843 continue; 1844 } 1845 } 1846 1847 bridge_enqueue(sc, dst_if, mc); 1848 } 1849 if (used == 0) 1850 m_freem(m); 1851 BRIDGE_UNREF(sc); 1852 return (0); 1853 } 1854 1855 sendunicast: 1856 /* 1857 * XXX Spanning tree consideration here? 1858 */ 1859 1860 bridge_span(sc, m); 1861 if ((dst_if->if_drv_flags & IFF_DRV_RUNNING) == 0) { 1862 m_freem(m); 1863 BRIDGE_UNLOCK(sc); 1864 return (0); 1865 } 1866 1867 BRIDGE_UNLOCK(sc); 1868 bridge_enqueue(sc, dst_if, m); 1869 return (0); 1870 } 1871 1872 /* 1873 * bridge_start: 1874 * 1875 * Start output on a bridge. 1876 * 1877 */ 1878 static void 1879 bridge_start(struct ifnet *ifp) 1880 { 1881 struct bridge_softc *sc; 1882 struct mbuf *m; 1883 struct ether_header *eh; 1884 struct ifnet *dst_if; 1885 1886 sc = ifp->if_softc; 1887 1888 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 1889 for (;;) { 1890 IFQ_DEQUEUE(&ifp->if_snd, m); 1891 if (m == 0) 1892 break; 1893 ETHER_BPF_MTAP(ifp, m); 1894 1895 eh = mtod(m, struct ether_header *); 1896 dst_if = NULL; 1897 1898 BRIDGE_LOCK(sc); 1899 if ((m->m_flags & (M_BCAST|M_MCAST)) == 0) { 1900 dst_if = bridge_rtlookup(sc, eh->ether_dhost, 1); 1901 } 1902 1903 if (dst_if == NULL) 1904 bridge_broadcast(sc, ifp, m, 0); 1905 else { 1906 BRIDGE_UNLOCK(sc); 1907 bridge_enqueue(sc, dst_if, m); 1908 } 1909 } 1910 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 1911 } 1912 1913 /* 1914 * bridge_forward: 1915 * 1916 * The forwarding function of the bridge. 1917 * 1918 * NOTE: Releases the lock on return. 1919 */ 1920 static void 1921 bridge_forward(struct bridge_softc *sc, struct bridge_iflist *sbif, 1922 struct mbuf *m) 1923 { 1924 struct bridge_iflist *dbif; 1925 struct ifnet *src_if, *dst_if, *ifp; 1926 struct ether_header *eh; 1927 uint16_t vlan; 1928 uint8_t *dst; 1929 int error; 1930 1931 src_if = m->m_pkthdr.rcvif; 1932 ifp = sc->sc_ifp; 1933 1934 ifp->if_ipackets++; 1935 ifp->if_ibytes += m->m_pkthdr.len; 1936 vlan = VLANTAGOF(m); 1937 1938 if ((sbif->bif_flags & IFBIF_STP) && 1939 sbif->bif_stp.bp_state == BSTP_IFSTATE_DISCARDING) 1940 goto drop; 1941 1942 eh = mtod(m, struct ether_header *); 1943 dst = eh->ether_dhost; 1944 1945 /* If the interface is learning, record the address. */ 1946 if (sbif->bif_flags & IFBIF_LEARNING) { 1947 error = bridge_rtupdate(sc, eh->ether_shost, vlan, 1948 sbif, 0, IFBAF_DYNAMIC); 1949 /* 1950 * If the interface has addresses limits then deny any source 1951 * that is not in the cache. 1952 */ 1953 if (error && sbif->bif_addrmax) 1954 goto drop; 1955 } 1956 1957 if ((sbif->bif_flags & IFBIF_STP) != 0 && 1958 sbif->bif_stp.bp_state == BSTP_IFSTATE_LEARNING) 1959 goto drop; 1960 1961 /* 1962 * At this point, the port either doesn't participate 1963 * in spanning tree or it is in the forwarding state. 1964 */ 1965 1966 /* 1967 * If the packet is unicast, destined for someone on 1968 * "this" side of the bridge, drop it. 1969 */ 1970 if ((m->m_flags & (M_BCAST|M_MCAST)) == 0) { 1971 dst_if = bridge_rtlookup(sc, dst, vlan); 1972 if (src_if == dst_if) 1973 goto drop; 1974 } else { 1975 /* 1976 * Check if its a reserved multicast address, any address 1977 * listed in 802.1D section 7.12.6 may not be forwarded by the 1978 * bridge. 1979 * This is currently 01-80-C2-00-00-00 to 01-80-C2-00-00-0F 1980 */ 1981 if (dst[0] == 0x01 && dst[1] == 0x80 && 1982 dst[2] == 0xc2 && dst[3] == 0x00 && 1983 dst[4] == 0x00 && dst[5] <= 0x0f) 1984 goto drop; 1985 1986 /* ...forward it to all interfaces. */ 1987 ifp->if_imcasts++; 1988 dst_if = NULL; 1989 } 1990 1991 /* 1992 * If we have a destination interface which is a member of our bridge, 1993 * OR this is a unicast packet, push it through the bpf(4) machinery. 1994 * For broadcast or multicast packets, don't bother because it will 1995 * be reinjected into ether_input. We do this before we pass the packets 1996 * through the pfil(9) framework, as it is possible that pfil(9) will 1997 * drop the packet, or possibly modify it, making it difficult to debug 1998 * firewall issues on the bridge. 1999 */ 2000 if (dst_if != NULL || (m->m_flags & (M_BCAST | M_MCAST)) == 0) 2001 ETHER_BPF_MTAP(ifp, m); 2002 2003 /* run the packet filter */ 2004 if (PFIL_HOOKED(&inet_pfil_hook) 2005 #ifdef INET6 2006 || PFIL_HOOKED(&inet6_pfil_hook) 2007 #endif 2008 ) { 2009 BRIDGE_UNLOCK(sc); 2010 if (bridge_pfil(&m, ifp, src_if, PFIL_IN) != 0) 2011 return; 2012 if (m == NULL) 2013 return; 2014 BRIDGE_LOCK(sc); 2015 } 2016 2017 if (dst_if == NULL) { 2018 bridge_broadcast(sc, src_if, m, 1); 2019 return; 2020 } 2021 2022 /* 2023 * At this point, we're dealing with a unicast frame 2024 * going to a different interface. 2025 */ 2026 if ((dst_if->if_drv_flags & IFF_DRV_RUNNING) == 0) 2027 goto drop; 2028 2029 dbif = bridge_lookup_member_if(sc, dst_if); 2030 if (dbif == NULL) 2031 /* Not a member of the bridge (anymore?) */ 2032 goto drop; 2033 2034 /* Private segments can not talk to each other */ 2035 if (sbif->bif_flags & dbif->bif_flags & IFBIF_PRIVATE) 2036 goto drop; 2037 2038 if ((dbif->bif_flags & IFBIF_STP) && 2039 dbif->bif_stp.bp_state == BSTP_IFSTATE_DISCARDING) 2040 goto drop; 2041 2042 BRIDGE_UNLOCK(sc); 2043 2044 if (PFIL_HOOKED(&inet_pfil_hook) 2045 #ifdef INET6 2046 || PFIL_HOOKED(&inet6_pfil_hook) 2047 #endif 2048 ) { 2049 if (bridge_pfil(&m, ifp, dst_if, PFIL_OUT) != 0) 2050 return; 2051 if (m == NULL) 2052 return; 2053 } 2054 2055 bridge_enqueue(sc, dst_if, m); 2056 return; 2057 2058 drop: 2059 BRIDGE_UNLOCK(sc); 2060 m_freem(m); 2061 } 2062 2063 /* 2064 * bridge_input: 2065 * 2066 * Receive input from a member interface. Queue the packet for 2067 * bridging if it is not for us. 2068 */ 2069 static struct mbuf * 2070 bridge_input(struct ifnet *ifp, struct mbuf *m) 2071 { 2072 struct bridge_softc *sc = ifp->if_bridge; 2073 struct bridge_iflist *bif, *bif2; 2074 struct ifnet *bifp; 2075 struct ether_header *eh; 2076 struct mbuf *mc, *mc2; 2077 uint16_t vlan; 2078 int error; 2079 2080 if ((sc->sc_ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) 2081 return (m); 2082 2083 bifp = sc->sc_ifp; 2084 vlan = VLANTAGOF(m); 2085 2086 /* 2087 * Implement support for bridge monitoring. If this flag has been 2088 * set on this interface, discard the packet once we push it through 2089 * the bpf(4) machinery, but before we do, increment the byte and 2090 * packet counters associated with this interface. 2091 */ 2092 if ((bifp->if_flags & IFF_MONITOR) != 0) { 2093 m->m_pkthdr.rcvif = bifp; 2094 ETHER_BPF_MTAP(bifp, m); 2095 bifp->if_ipackets++; 2096 bifp->if_ibytes += m->m_pkthdr.len; 2097 m_freem(m); 2098 return (NULL); 2099 } 2100 BRIDGE_LOCK(sc); 2101 bif = bridge_lookup_member_if(sc, ifp); 2102 if (bif == NULL) { 2103 BRIDGE_UNLOCK(sc); 2104 return (m); 2105 } 2106 2107 eh = mtod(m, struct ether_header *); 2108 2109 bridge_span(sc, m); 2110 2111 if (m->m_flags & (M_BCAST|M_MCAST)) { 2112 /* Tap off 802.1D packets; they do not get forwarded. */ 2113 if (memcmp(eh->ether_dhost, bstp_etheraddr, 2114 ETHER_ADDR_LEN) == 0) { 2115 m = bstp_input(&bif->bif_stp, ifp, m); 2116 if (m == NULL) { 2117 BRIDGE_UNLOCK(sc); 2118 return (NULL); 2119 } 2120 } 2121 2122 if ((bif->bif_flags & IFBIF_STP) && 2123 bif->bif_stp.bp_state == BSTP_IFSTATE_DISCARDING) { 2124 BRIDGE_UNLOCK(sc); 2125 return (m); 2126 } 2127 2128 /* 2129 * Make a deep copy of the packet and enqueue the copy 2130 * for bridge processing; return the original packet for 2131 * local processing. 2132 */ 2133 mc = m_dup(m, M_DONTWAIT); 2134 if (mc == NULL) { 2135 BRIDGE_UNLOCK(sc); 2136 return (m); 2137 } 2138 2139 /* Perform the bridge forwarding function with the copy. */ 2140 bridge_forward(sc, bif, mc); 2141 2142 /* 2143 * Reinject the mbuf as arriving on the bridge so we have a 2144 * chance at claiming multicast packets. We can not loop back 2145 * here from ether_input as a bridge is never a member of a 2146 * bridge. 2147 */ 2148 KASSERT(bifp->if_bridge == NULL, 2149 ("loop created in bridge_input")); 2150 mc2 = m_dup(m, M_DONTWAIT); 2151 if (mc2 != NULL) { 2152 /* Keep the layer3 header aligned */ 2153 int i = min(mc2->m_pkthdr.len, max_protohdr); 2154 mc2 = m_copyup(mc2, i, ETHER_ALIGN); 2155 } 2156 if (mc2 != NULL) { 2157 mc2->m_pkthdr.rcvif = bifp; 2158 (*bifp->if_input)(bifp, mc2); 2159 } 2160 2161 /* Return the original packet for local processing. */ 2162 return (m); 2163 } 2164 2165 if ((bif->bif_flags & IFBIF_STP) && 2166 bif->bif_stp.bp_state == BSTP_IFSTATE_DISCARDING) { 2167 BRIDGE_UNLOCK(sc); 2168 return (m); 2169 } 2170 2171 #ifdef DEV_CARP 2172 # define OR_CARP_CHECK_WE_ARE_DST(iface) \ 2173 || ((iface)->if_carp \ 2174 && carp_forus((iface)->if_carp, eh->ether_dhost)) 2175 # define OR_CARP_CHECK_WE_ARE_SRC(iface) \ 2176 || ((iface)->if_carp \ 2177 && carp_forus((iface)->if_carp, eh->ether_shost)) 2178 #else 2179 # define OR_CARP_CHECK_WE_ARE_DST(iface) 2180 # define OR_CARP_CHECK_WE_ARE_SRC(iface) 2181 #endif 2182 2183 #ifdef INET6 2184 # define OR_PFIL_HOOKED_INET6 \ 2185 || PFIL_HOOKED(&inet6_pfil_hook) 2186 #else 2187 # define OR_PFIL_HOOKED_INET6 2188 #endif 2189 2190 #define GRAB_OUR_PACKETS(iface) \ 2191 if ((iface)->if_type == IFT_GIF) \ 2192 continue; \ 2193 /* It is destined for us. */ \ 2194 if (memcmp(IF_LLADDR((iface)), eh->ether_dhost, ETHER_ADDR_LEN) == 0 \ 2195 OR_CARP_CHECK_WE_ARE_DST((iface)) \ 2196 ) { \ 2197 if ((iface)->if_type == IFT_BRIDGE) { \ 2198 ETHER_BPF_MTAP(iface, m); \ 2199 iface->if_ipackets++; \ 2200 /* Filter on the physical interface. */ \ 2201 if (pfil_local_phys && \ 2202 (PFIL_HOOKED(&inet_pfil_hook) \ 2203 OR_PFIL_HOOKED_INET6)) { \ 2204 if (bridge_pfil(&m, NULL, ifp, \ 2205 PFIL_IN) != 0 || m == NULL) { \ 2206 BRIDGE_UNLOCK(sc); \ 2207 return (NULL); \ 2208 } \ 2209 } \ 2210 } \ 2211 if (bif->bif_flags & IFBIF_LEARNING) { \ 2212 error = bridge_rtupdate(sc, eh->ether_shost, \ 2213 vlan, bif, 0, IFBAF_DYNAMIC); \ 2214 if (error && bif->bif_addrmax) { \ 2215 BRIDGE_UNLOCK(sc); \ 2216 m_freem(m); \ 2217 return (NULL); \ 2218 } \ 2219 } \ 2220 m->m_pkthdr.rcvif = iface; \ 2221 BRIDGE_UNLOCK(sc); \ 2222 return (m); \ 2223 } \ 2224 \ 2225 /* We just received a packet that we sent out. */ \ 2226 if (memcmp(IF_LLADDR((iface)), eh->ether_shost, ETHER_ADDR_LEN) == 0 \ 2227 OR_CARP_CHECK_WE_ARE_SRC((iface)) \ 2228 ) { \ 2229 BRIDGE_UNLOCK(sc); \ 2230 m_freem(m); \ 2231 return (NULL); \ 2232 } 2233 2234 /* 2235 * Unicast. Make sure it's not for the bridge. 2236 */ 2237 do { GRAB_OUR_PACKETS(bifp) } while (0); 2238 2239 /* 2240 * Give a chance for ifp at first priority. This will help when the 2241 * packet comes through the interface like VLAN's with the same MACs 2242 * on several interfaces from the same bridge. This also will save 2243 * some CPU cycles in case the destination interface and the input 2244 * interface (eq ifp) are the same. 2245 */ 2246 do { GRAB_OUR_PACKETS(ifp) } while (0); 2247 2248 /* Now check the all bridge members. */ 2249 LIST_FOREACH(bif2, &sc->sc_iflist, bif_next) { 2250 GRAB_OUR_PACKETS(bif2->bif_ifp) 2251 } 2252 2253 #undef OR_CARP_CHECK_WE_ARE_DST 2254 #undef OR_CARP_CHECK_WE_ARE_SRC 2255 #undef OR_PFIL_HOOKED_INET6 2256 #undef GRAB_OUR_PACKETS 2257 2258 /* Perform the bridge forwarding function. */ 2259 bridge_forward(sc, bif, m); 2260 2261 return (NULL); 2262 } 2263 2264 /* 2265 * bridge_broadcast: 2266 * 2267 * Send a frame to all interfaces that are members of 2268 * the bridge, except for the one on which the packet 2269 * arrived. 2270 * 2271 * NOTE: Releases the lock on return. 2272 */ 2273 static void 2274 bridge_broadcast(struct bridge_softc *sc, struct ifnet *src_if, 2275 struct mbuf *m, int runfilt) 2276 { 2277 struct bridge_iflist *dbif, *sbif; 2278 struct mbuf *mc; 2279 struct ifnet *dst_if; 2280 int error = 0, used = 0, i; 2281 2282 sbif = bridge_lookup_member_if(sc, src_if); 2283 2284 BRIDGE_LOCK2REF(sc, error); 2285 if (error) { 2286 m_freem(m); 2287 return; 2288 } 2289 2290 /* Filter on the bridge interface before broadcasting */ 2291 if (runfilt && (PFIL_HOOKED(&inet_pfil_hook) 2292 #ifdef INET6 2293 || PFIL_HOOKED(&inet6_pfil_hook) 2294 #endif 2295 )) { 2296 if (bridge_pfil(&m, sc->sc_ifp, NULL, PFIL_OUT) != 0) 2297 goto out; 2298 if (m == NULL) 2299 goto out; 2300 } 2301 2302 LIST_FOREACH(dbif, &sc->sc_iflist, bif_next) { 2303 dst_if = dbif->bif_ifp; 2304 if (dst_if == src_if) 2305 continue; 2306 2307 /* Private segments can not talk to each other */ 2308 if (sbif && (sbif->bif_flags & dbif->bif_flags & IFBIF_PRIVATE)) 2309 continue; 2310 2311 if ((dbif->bif_flags & IFBIF_STP) && 2312 dbif->bif_stp.bp_state == BSTP_IFSTATE_DISCARDING) 2313 continue; 2314 2315 if ((dbif->bif_flags & IFBIF_DISCOVER) == 0 && 2316 (m->m_flags & (M_BCAST|M_MCAST)) == 0) 2317 continue; 2318 2319 if ((dst_if->if_drv_flags & IFF_DRV_RUNNING) == 0) 2320 continue; 2321 2322 if (LIST_NEXT(dbif, bif_next) == NULL) { 2323 mc = m; 2324 used = 1; 2325 } else { 2326 mc = m_dup(m, M_DONTWAIT); 2327 if (mc == NULL) { 2328 sc->sc_ifp->if_oerrors++; 2329 continue; 2330 } 2331 } 2332 2333 /* 2334 * Filter on the output interface. Pass a NULL bridge interface 2335 * pointer so we do not redundantly filter on the bridge for 2336 * each interface we broadcast on. 2337 */ 2338 if (runfilt && (PFIL_HOOKED(&inet_pfil_hook) 2339 #ifdef INET6 2340 || PFIL_HOOKED(&inet6_pfil_hook) 2341 #endif 2342 )) { 2343 if (used == 0) { 2344 /* Keep the layer3 header aligned */ 2345 i = min(mc->m_pkthdr.len, max_protohdr); 2346 mc = m_copyup(mc, i, ETHER_ALIGN); 2347 if (mc == NULL) { 2348 sc->sc_ifp->if_oerrors++; 2349 continue; 2350 } 2351 } 2352 if (bridge_pfil(&mc, NULL, dst_if, PFIL_OUT) != 0) 2353 continue; 2354 if (mc == NULL) 2355 continue; 2356 } 2357 2358 bridge_enqueue(sc, dst_if, mc); 2359 } 2360 if (used == 0) 2361 m_freem(m); 2362 2363 out: 2364 BRIDGE_UNREF(sc); 2365 } 2366 2367 /* 2368 * bridge_span: 2369 * 2370 * Duplicate a packet out one or more interfaces that are in span mode, 2371 * the original mbuf is unmodified. 2372 */ 2373 static void 2374 bridge_span(struct bridge_softc *sc, struct mbuf *m) 2375 { 2376 struct bridge_iflist *bif; 2377 struct ifnet *dst_if; 2378 struct mbuf *mc; 2379 2380 if (LIST_EMPTY(&sc->sc_spanlist)) 2381 return; 2382 2383 LIST_FOREACH(bif, &sc->sc_spanlist, bif_next) { 2384 dst_if = bif->bif_ifp; 2385 2386 if ((dst_if->if_drv_flags & IFF_DRV_RUNNING) == 0) 2387 continue; 2388 2389 mc = m_copypacket(m, M_DONTWAIT); 2390 if (mc == NULL) { 2391 sc->sc_ifp->if_oerrors++; 2392 continue; 2393 } 2394 2395 bridge_enqueue(sc, dst_if, mc); 2396 } 2397 } 2398 2399 /* 2400 * bridge_rtupdate: 2401 * 2402 * Add a bridge routing entry. 2403 */ 2404 static int 2405 bridge_rtupdate(struct bridge_softc *sc, const uint8_t *dst, uint16_t vlan, 2406 struct bridge_iflist *bif, int setflags, uint8_t flags) 2407 { 2408 struct bridge_rtnode *brt; 2409 int error; 2410 2411 BRIDGE_LOCK_ASSERT(sc); 2412 2413 /* Check the source address is valid and not multicast. */ 2414 if (ETHER_IS_MULTICAST(dst) || 2415 (dst[0] == 0 && dst[1] == 0 && dst[2] == 0 && 2416 dst[3] == 0 && dst[4] == 0 && dst[5] == 0) != 0) 2417 return (EINVAL); 2418 2419 /* 802.1p frames map to vlan 1 */ 2420 if (vlan == 0) 2421 vlan = 1; 2422 2423 /* 2424 * A route for this destination might already exist. If so, 2425 * update it, otherwise create a new one. 2426 */ 2427 if ((brt = bridge_rtnode_lookup(sc, dst, vlan)) == NULL) { 2428 if (sc->sc_brtcnt >= sc->sc_brtmax) { 2429 sc->sc_brtexceeded++; 2430 return (ENOSPC); 2431 } 2432 /* Check per interface address limits (if enabled) */ 2433 if (bif->bif_addrmax && bif->bif_addrcnt >= bif->bif_addrmax) { 2434 bif->bif_addrexceeded++; 2435 return (ENOSPC); 2436 } 2437 2438 /* 2439 * Allocate a new bridge forwarding node, and 2440 * initialize the expiration time and Ethernet 2441 * address. 2442 */ 2443 brt = uma_zalloc(bridge_rtnode_zone, M_NOWAIT | M_ZERO); 2444 if (brt == NULL) 2445 return (ENOMEM); 2446 2447 if (bif->bif_flags & IFBIF_STICKY) 2448 brt->brt_flags = IFBAF_STICKY; 2449 else 2450 brt->brt_flags = IFBAF_DYNAMIC; 2451 2452 memcpy(brt->brt_addr, dst, ETHER_ADDR_LEN); 2453 brt->brt_vlan = vlan; 2454 2455 if ((error = bridge_rtnode_insert(sc, brt)) != 0) { 2456 uma_zfree(bridge_rtnode_zone, brt); 2457 return (error); 2458 } 2459 brt->brt_dst = bif; 2460 bif->bif_addrcnt++; 2461 } 2462 2463 if ((brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC && 2464 brt->brt_dst != bif) { 2465 brt->brt_dst->bif_addrcnt--; 2466 brt->brt_dst = bif; 2467 brt->brt_dst->bif_addrcnt++; 2468 } 2469 2470 if ((flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC) 2471 brt->brt_expire = time_uptime + sc->sc_brttimeout; 2472 if (setflags) 2473 brt->brt_flags = flags; 2474 2475 return (0); 2476 } 2477 2478 /* 2479 * bridge_rtlookup: 2480 * 2481 * Lookup the destination interface for an address. 2482 */ 2483 static struct ifnet * 2484 bridge_rtlookup(struct bridge_softc *sc, const uint8_t *addr, uint16_t vlan) 2485 { 2486 struct bridge_rtnode *brt; 2487 2488 BRIDGE_LOCK_ASSERT(sc); 2489 2490 if ((brt = bridge_rtnode_lookup(sc, addr, vlan)) == NULL) 2491 return (NULL); 2492 2493 return (brt->brt_ifp); 2494 } 2495 2496 /* 2497 * bridge_rttrim: 2498 * 2499 * Trim the routine table so that we have a number 2500 * of routing entries less than or equal to the 2501 * maximum number. 2502 */ 2503 static void 2504 bridge_rttrim(struct bridge_softc *sc) 2505 { 2506 struct bridge_rtnode *brt, *nbrt; 2507 2508 BRIDGE_LOCK_ASSERT(sc); 2509 2510 /* Make sure we actually need to do this. */ 2511 if (sc->sc_brtcnt <= sc->sc_brtmax) 2512 return; 2513 2514 /* Force an aging cycle; this might trim enough addresses. */ 2515 bridge_rtage(sc); 2516 if (sc->sc_brtcnt <= sc->sc_brtmax) 2517 return; 2518 2519 LIST_FOREACH_SAFE(brt, &sc->sc_rtlist, brt_list, nbrt) { 2520 if ((brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC) { 2521 bridge_rtnode_destroy(sc, brt); 2522 if (sc->sc_brtcnt <= sc->sc_brtmax) 2523 return; 2524 } 2525 } 2526 } 2527 2528 /* 2529 * bridge_timer: 2530 * 2531 * Aging timer for the bridge. 2532 */ 2533 static void 2534 bridge_timer(void *arg) 2535 { 2536 struct bridge_softc *sc = arg; 2537 2538 BRIDGE_LOCK_ASSERT(sc); 2539 2540 bridge_rtage(sc); 2541 2542 if (sc->sc_ifp->if_drv_flags & IFF_DRV_RUNNING) 2543 callout_reset(&sc->sc_brcallout, 2544 bridge_rtable_prune_period * hz, bridge_timer, sc); 2545 } 2546 2547 /* 2548 * bridge_rtage: 2549 * 2550 * Perform an aging cycle. 2551 */ 2552 static void 2553 bridge_rtage(struct bridge_softc *sc) 2554 { 2555 struct bridge_rtnode *brt, *nbrt; 2556 2557 BRIDGE_LOCK_ASSERT(sc); 2558 2559 LIST_FOREACH_SAFE(brt, &sc->sc_rtlist, brt_list, nbrt) { 2560 if ((brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC) { 2561 if (time_uptime >= brt->brt_expire) 2562 bridge_rtnode_destroy(sc, brt); 2563 } 2564 } 2565 } 2566 2567 /* 2568 * bridge_rtflush: 2569 * 2570 * Remove all dynamic addresses from the bridge. 2571 */ 2572 static void 2573 bridge_rtflush(struct bridge_softc *sc, int full) 2574 { 2575 struct bridge_rtnode *brt, *nbrt; 2576 2577 BRIDGE_LOCK_ASSERT(sc); 2578 2579 LIST_FOREACH_SAFE(brt, &sc->sc_rtlist, brt_list, nbrt) { 2580 if (full || (brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC) 2581 bridge_rtnode_destroy(sc, brt); 2582 } 2583 } 2584 2585 /* 2586 * bridge_rtdaddr: 2587 * 2588 * Remove an address from the table. 2589 */ 2590 static int 2591 bridge_rtdaddr(struct bridge_softc *sc, const uint8_t *addr, uint16_t vlan) 2592 { 2593 struct bridge_rtnode *brt; 2594 int found = 0; 2595 2596 BRIDGE_LOCK_ASSERT(sc); 2597 2598 /* 2599 * If vlan is zero then we want to delete for all vlans so the lookup 2600 * may return more than one. 2601 */ 2602 while ((brt = bridge_rtnode_lookup(sc, addr, vlan)) != NULL) { 2603 bridge_rtnode_destroy(sc, brt); 2604 found = 1; 2605 } 2606 2607 return (found ? 0 : ENOENT); 2608 } 2609 2610 /* 2611 * bridge_rtdelete: 2612 * 2613 * Delete routes to a speicifc member interface. 2614 */ 2615 static void 2616 bridge_rtdelete(struct bridge_softc *sc, struct ifnet *ifp, int full) 2617 { 2618 struct bridge_rtnode *brt, *nbrt; 2619 2620 BRIDGE_LOCK_ASSERT(sc); 2621 2622 LIST_FOREACH_SAFE(brt, &sc->sc_rtlist, brt_list, nbrt) { 2623 if (brt->brt_ifp == ifp && (full || 2624 (brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC)) 2625 bridge_rtnode_destroy(sc, brt); 2626 } 2627 } 2628 2629 /* 2630 * bridge_rtable_init: 2631 * 2632 * Initialize the route table for this bridge. 2633 */ 2634 static int 2635 bridge_rtable_init(struct bridge_softc *sc) 2636 { 2637 int i; 2638 2639 sc->sc_rthash = malloc(sizeof(*sc->sc_rthash) * BRIDGE_RTHASH_SIZE, 2640 M_DEVBUF, M_NOWAIT); 2641 if (sc->sc_rthash == NULL) 2642 return (ENOMEM); 2643 2644 for (i = 0; i < BRIDGE_RTHASH_SIZE; i++) 2645 LIST_INIT(&sc->sc_rthash[i]); 2646 2647 sc->sc_rthash_key = arc4random(); 2648 2649 LIST_INIT(&sc->sc_rtlist); 2650 2651 return (0); 2652 } 2653 2654 /* 2655 * bridge_rtable_fini: 2656 * 2657 * Deconstruct the route table for this bridge. 2658 */ 2659 static void 2660 bridge_rtable_fini(struct bridge_softc *sc) 2661 { 2662 2663 KASSERT(sc->sc_brtcnt == 0, 2664 ("%s: %d bridge routes referenced", __func__, sc->sc_brtcnt)); 2665 free(sc->sc_rthash, M_DEVBUF); 2666 } 2667 2668 /* 2669 * The following hash function is adapted from "Hash Functions" by Bob Jenkins 2670 * ("Algorithm Alley", Dr. Dobbs Journal, September 1997). 2671 */ 2672 #define mix(a, b, c) \ 2673 do { \ 2674 a -= b; a -= c; a ^= (c >> 13); \ 2675 b -= c; b -= a; b ^= (a << 8); \ 2676 c -= a; c -= b; c ^= (b >> 13); \ 2677 a -= b; a -= c; a ^= (c >> 12); \ 2678 b -= c; b -= a; b ^= (a << 16); \ 2679 c -= a; c -= b; c ^= (b >> 5); \ 2680 a -= b; a -= c; a ^= (c >> 3); \ 2681 b -= c; b -= a; b ^= (a << 10); \ 2682 c -= a; c -= b; c ^= (b >> 15); \ 2683 } while (/*CONSTCOND*/0) 2684 2685 static __inline uint32_t 2686 bridge_rthash(struct bridge_softc *sc, const uint8_t *addr) 2687 { 2688 uint32_t a = 0x9e3779b9, b = 0x9e3779b9, c = sc->sc_rthash_key; 2689 2690 b += addr[5] << 8; 2691 b += addr[4]; 2692 a += addr[3] << 24; 2693 a += addr[2] << 16; 2694 a += addr[1] << 8; 2695 a += addr[0]; 2696 2697 mix(a, b, c); 2698 2699 return (c & BRIDGE_RTHASH_MASK); 2700 } 2701 2702 #undef mix 2703 2704 static int 2705 bridge_rtnode_addr_cmp(const uint8_t *a, const uint8_t *b) 2706 { 2707 int i, d; 2708 2709 for (i = 0, d = 0; i < ETHER_ADDR_LEN && d == 0; i++) { 2710 d = ((int)a[i]) - ((int)b[i]); 2711 } 2712 2713 return (d); 2714 } 2715 2716 /* 2717 * bridge_rtnode_lookup: 2718 * 2719 * Look up a bridge route node for the specified destination. Compare the 2720 * vlan id or if zero then just return the first match. 2721 */ 2722 static struct bridge_rtnode * 2723 bridge_rtnode_lookup(struct bridge_softc *sc, const uint8_t *addr, uint16_t vlan) 2724 { 2725 struct bridge_rtnode *brt; 2726 uint32_t hash; 2727 int dir; 2728 2729 BRIDGE_LOCK_ASSERT(sc); 2730 2731 hash = bridge_rthash(sc, addr); 2732 LIST_FOREACH(brt, &sc->sc_rthash[hash], brt_hash) { 2733 dir = bridge_rtnode_addr_cmp(addr, brt->brt_addr); 2734 if (dir == 0 && (brt->brt_vlan == vlan || vlan == 0)) 2735 return (brt); 2736 if (dir > 0) 2737 return (NULL); 2738 } 2739 2740 return (NULL); 2741 } 2742 2743 /* 2744 * bridge_rtnode_insert: 2745 * 2746 * Insert the specified bridge node into the route table. We 2747 * assume the entry is not already in the table. 2748 */ 2749 static int 2750 bridge_rtnode_insert(struct bridge_softc *sc, struct bridge_rtnode *brt) 2751 { 2752 struct bridge_rtnode *lbrt; 2753 uint32_t hash; 2754 int dir; 2755 2756 BRIDGE_LOCK_ASSERT(sc); 2757 2758 hash = bridge_rthash(sc, brt->brt_addr); 2759 2760 lbrt = LIST_FIRST(&sc->sc_rthash[hash]); 2761 if (lbrt == NULL) { 2762 LIST_INSERT_HEAD(&sc->sc_rthash[hash], brt, brt_hash); 2763 goto out; 2764 } 2765 2766 do { 2767 dir = bridge_rtnode_addr_cmp(brt->brt_addr, lbrt->brt_addr); 2768 if (dir == 0 && brt->brt_vlan == lbrt->brt_vlan) 2769 return (EEXIST); 2770 if (dir > 0) { 2771 LIST_INSERT_BEFORE(lbrt, brt, brt_hash); 2772 goto out; 2773 } 2774 if (LIST_NEXT(lbrt, brt_hash) == NULL) { 2775 LIST_INSERT_AFTER(lbrt, brt, brt_hash); 2776 goto out; 2777 } 2778 lbrt = LIST_NEXT(lbrt, brt_hash); 2779 } while (lbrt != NULL); 2780 2781 #ifdef DIAGNOSTIC 2782 panic("bridge_rtnode_insert: impossible"); 2783 #endif 2784 2785 out: 2786 LIST_INSERT_HEAD(&sc->sc_rtlist, brt, brt_list); 2787 sc->sc_brtcnt++; 2788 2789 return (0); 2790 } 2791 2792 /* 2793 * bridge_rtnode_destroy: 2794 * 2795 * Destroy a bridge rtnode. 2796 */ 2797 static void 2798 bridge_rtnode_destroy(struct bridge_softc *sc, struct bridge_rtnode *brt) 2799 { 2800 BRIDGE_LOCK_ASSERT(sc); 2801 2802 LIST_REMOVE(brt, brt_hash); 2803 2804 LIST_REMOVE(brt, brt_list); 2805 sc->sc_brtcnt--; 2806 brt->brt_dst->bif_addrcnt--; 2807 uma_zfree(bridge_rtnode_zone, brt); 2808 } 2809 2810 /* 2811 * bridge_rtable_expire: 2812 * 2813 * Set the expiry time for all routes on an interface. 2814 */ 2815 static void 2816 bridge_rtable_expire(struct ifnet *ifp, int age) 2817 { 2818 struct bridge_softc *sc = ifp->if_bridge; 2819 struct bridge_rtnode *brt; 2820 2821 BRIDGE_LOCK(sc); 2822 2823 /* 2824 * If the age is zero then flush, otherwise set all the expiry times to 2825 * age for the interface 2826 */ 2827 if (age == 0) 2828 bridge_rtdelete(sc, ifp, IFBF_FLUSHDYN); 2829 else { 2830 LIST_FOREACH(brt, &sc->sc_rtlist, brt_list) { 2831 /* Cap the expiry time to 'age' */ 2832 if (brt->brt_ifp == ifp && 2833 brt->brt_expire > time_uptime + age && 2834 (brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC) 2835 brt->brt_expire = time_uptime + age; 2836 } 2837 } 2838 BRIDGE_UNLOCK(sc); 2839 } 2840 2841 /* 2842 * bridge_state_change: 2843 * 2844 * Callback from the bridgestp code when a port changes states. 2845 */ 2846 static void 2847 bridge_state_change(struct ifnet *ifp, int state) 2848 { 2849 struct bridge_softc *sc = ifp->if_bridge; 2850 static const char *stpstates[] = { 2851 "disabled", 2852 "listening", 2853 "learning", 2854 "forwarding", 2855 "blocking", 2856 "discarding" 2857 }; 2858 2859 if (log_stp) 2860 log(LOG_NOTICE, "%s: state changed to %s on %s\n", 2861 sc->sc_ifp->if_xname, stpstates[state], ifp->if_xname); 2862 } 2863 2864 /* 2865 * Send bridge packets through pfil if they are one of the types pfil can deal 2866 * with, or if they are ARP or REVARP. (pfil will pass ARP and REVARP without 2867 * question.) If *bifp or *ifp are NULL then packet filtering is skipped for 2868 * that interface. 2869 */ 2870 static int 2871 bridge_pfil(struct mbuf **mp, struct ifnet *bifp, struct ifnet *ifp, int dir) 2872 { 2873 int snap, error, i, hlen; 2874 struct ether_header *eh1, eh2; 2875 struct ip_fw_args args; 2876 struct ip *ip; 2877 struct llc llc1; 2878 u_int16_t ether_type; 2879 2880 snap = 0; 2881 error = -1; /* Default error if not error == 0 */ 2882 2883 #if 0 2884 /* we may return with the IP fields swapped, ensure its not shared */ 2885 KASSERT(M_WRITABLE(*mp), ("%s: modifying a shared mbuf", __func__)); 2886 #endif 2887 2888 if (pfil_bridge == 0 && pfil_member == 0 && pfil_ipfw == 0) 2889 return (0); /* filtering is disabled */ 2890 2891 i = min((*mp)->m_pkthdr.len, max_protohdr); 2892 if ((*mp)->m_len < i) { 2893 *mp = m_pullup(*mp, i); 2894 if (*mp == NULL) { 2895 printf("%s: m_pullup failed\n", __func__); 2896 return (-1); 2897 } 2898 } 2899 2900 eh1 = mtod(*mp, struct ether_header *); 2901 ether_type = ntohs(eh1->ether_type); 2902 2903 /* 2904 * Check for SNAP/LLC. 2905 */ 2906 if (ether_type < ETHERMTU) { 2907 struct llc *llc2 = (struct llc *)(eh1 + 1); 2908 2909 if ((*mp)->m_len >= ETHER_HDR_LEN + 8 && 2910 llc2->llc_dsap == LLC_SNAP_LSAP && 2911 llc2->llc_ssap == LLC_SNAP_LSAP && 2912 llc2->llc_control == LLC_UI) { 2913 ether_type = htons(llc2->llc_un.type_snap.ether_type); 2914 snap = 1; 2915 } 2916 } 2917 2918 /* 2919 * If we're trying to filter bridge traffic, don't look at anything 2920 * other than IP and ARP traffic. If the filter doesn't understand 2921 * IPv6, don't allow IPv6 through the bridge either. This is lame 2922 * since if we really wanted, say, an AppleTalk filter, we are hosed, 2923 * but of course we don't have an AppleTalk filter to begin with. 2924 * (Note that since pfil doesn't understand ARP it will pass *ALL* 2925 * ARP traffic.) 2926 */ 2927 switch (ether_type) { 2928 case ETHERTYPE_ARP: 2929 case ETHERTYPE_REVARP: 2930 if (pfil_ipfw_arp == 0) 2931 return (0); /* Automatically pass */ 2932 break; 2933 2934 case ETHERTYPE_IP: 2935 #ifdef INET6 2936 case ETHERTYPE_IPV6: 2937 #endif /* INET6 */ 2938 break; 2939 default: 2940 /* 2941 * Check to see if the user wants to pass non-ip 2942 * packets, these will not be checked by pfil(9) and 2943 * passed unconditionally so the default is to drop. 2944 */ 2945 if (pfil_onlyip) 2946 goto bad; 2947 } 2948 2949 /* Strip off the Ethernet header and keep a copy. */ 2950 m_copydata(*mp, 0, ETHER_HDR_LEN, (caddr_t) &eh2); 2951 m_adj(*mp, ETHER_HDR_LEN); 2952 2953 /* Strip off snap header, if present */ 2954 if (snap) { 2955 m_copydata(*mp, 0, sizeof(struct llc), (caddr_t) &llc1); 2956 m_adj(*mp, sizeof(struct llc)); 2957 } 2958 2959 /* 2960 * Check the IP header for alignment and errors 2961 */ 2962 if (dir == PFIL_IN) { 2963 switch (ether_type) { 2964 case ETHERTYPE_IP: 2965 error = bridge_ip_checkbasic(mp); 2966 break; 2967 #ifdef INET6 2968 case ETHERTYPE_IPV6: 2969 error = bridge_ip6_checkbasic(mp); 2970 break; 2971 #endif /* INET6 */ 2972 default: 2973 error = 0; 2974 } 2975 if (error) 2976 goto bad; 2977 } 2978 2979 if (IPFW_LOADED && pfil_ipfw != 0 && dir == PFIL_OUT && ifp != NULL) { 2980 error = -1; 2981 args.rule = ip_dn_claim_rule(*mp); 2982 if (args.rule != NULL && fw_one_pass) 2983 goto ipfwpass; /* packet already partially processed */ 2984 2985 args.m = *mp; 2986 args.oif = ifp; 2987 args.next_hop = NULL; 2988 args.eh = &eh2; 2989 args.inp = NULL; /* used by ipfw uid/gid/jail rules */ 2990 i = ip_fw_chk_ptr(&args); 2991 *mp = args.m; 2992 2993 if (*mp == NULL) 2994 return (error); 2995 2996 if (DUMMYNET_LOADED && (i == IP_FW_DUMMYNET)) { 2997 2998 /* put the Ethernet header back on */ 2999 M_PREPEND(*mp, ETHER_HDR_LEN, M_DONTWAIT); 3000 if (*mp == NULL) 3001 return (error); 3002 bcopy(&eh2, mtod(*mp, caddr_t), ETHER_HDR_LEN); 3003 3004 /* 3005 * Pass the pkt to dummynet, which consumes it. The 3006 * packet will return to us via bridge_dummynet(). 3007 */ 3008 args.oif = ifp; 3009 ip_dn_io_ptr(mp, DN_TO_IFB_FWD, &args); 3010 return (error); 3011 } 3012 3013 if (i != IP_FW_PASS) /* drop */ 3014 goto bad; 3015 } 3016 3017 ipfwpass: 3018 error = 0; 3019 3020 /* 3021 * Run the packet through pfil 3022 */ 3023 switch (ether_type) { 3024 case ETHERTYPE_IP: 3025 /* 3026 * before calling the firewall, swap fields the same as 3027 * IP does. here we assume the header is contiguous 3028 */ 3029 ip = mtod(*mp, struct ip *); 3030 3031 ip->ip_len = ntohs(ip->ip_len); 3032 ip->ip_off = ntohs(ip->ip_off); 3033 3034 /* 3035 * Run pfil on the member interface and the bridge, both can 3036 * be skipped by clearing pfil_member or pfil_bridge. 3037 * 3038 * Keep the order: 3039 * in_if -> bridge_if -> out_if 3040 */ 3041 if (pfil_bridge && dir == PFIL_OUT && bifp != NULL) 3042 error = pfil_run_hooks(&inet_pfil_hook, mp, bifp, 3043 dir, NULL); 3044 3045 if (*mp == NULL || error != 0) /* filter may consume */ 3046 break; 3047 3048 if (pfil_member && ifp != NULL) 3049 error = pfil_run_hooks(&inet_pfil_hook, mp, ifp, 3050 dir, NULL); 3051 3052 if (*mp == NULL || error != 0) /* filter may consume */ 3053 break; 3054 3055 if (pfil_bridge && dir == PFIL_IN && bifp != NULL) 3056 error = pfil_run_hooks(&inet_pfil_hook, mp, bifp, 3057 dir, NULL); 3058 3059 if (*mp == NULL || error != 0) /* filter may consume */ 3060 break; 3061 3062 /* check if we need to fragment the packet */ 3063 if (pfil_member && ifp != NULL && dir == PFIL_OUT) { 3064 i = (*mp)->m_pkthdr.len; 3065 if (i > ifp->if_mtu) { 3066 error = bridge_fragment(ifp, *mp, &eh2, snap, 3067 &llc1); 3068 return (error); 3069 } 3070 } 3071 3072 /* Recalculate the ip checksum and restore byte ordering */ 3073 ip = mtod(*mp, struct ip *); 3074 hlen = ip->ip_hl << 2; 3075 if (hlen < sizeof(struct ip)) 3076 goto bad; 3077 if (hlen > (*mp)->m_len) { 3078 if ((*mp = m_pullup(*mp, hlen)) == 0) 3079 goto bad; 3080 ip = mtod(*mp, struct ip *); 3081 if (ip == NULL) 3082 goto bad; 3083 } 3084 ip->ip_len = htons(ip->ip_len); 3085 ip->ip_off = htons(ip->ip_off); 3086 ip->ip_sum = 0; 3087 if (hlen == sizeof(struct ip)) 3088 ip->ip_sum = in_cksum_hdr(ip); 3089 else 3090 ip->ip_sum = in_cksum(*mp, hlen); 3091 3092 break; 3093 #ifdef INET6 3094 case ETHERTYPE_IPV6: 3095 if (pfil_bridge && dir == PFIL_OUT && bifp != NULL) 3096 error = pfil_run_hooks(&inet6_pfil_hook, mp, bifp, 3097 dir, NULL); 3098 3099 if (*mp == NULL || error != 0) /* filter may consume */ 3100 break; 3101 3102 if (pfil_member && ifp != NULL) 3103 error = pfil_run_hooks(&inet6_pfil_hook, mp, ifp, 3104 dir, NULL); 3105 3106 if (*mp == NULL || error != 0) /* filter may consume */ 3107 break; 3108 3109 if (pfil_bridge && dir == PFIL_IN && bifp != NULL) 3110 error = pfil_run_hooks(&inet6_pfil_hook, mp, bifp, 3111 dir, NULL); 3112 break; 3113 #endif 3114 default: 3115 error = 0; 3116 break; 3117 } 3118 3119 if (*mp == NULL) 3120 return (error); 3121 if (error != 0) 3122 goto bad; 3123 3124 error = -1; 3125 3126 /* 3127 * Finally, put everything back the way it was and return 3128 */ 3129 if (snap) { 3130 M_PREPEND(*mp, sizeof(struct llc), M_DONTWAIT); 3131 if (*mp == NULL) 3132 return (error); 3133 bcopy(&llc1, mtod(*mp, caddr_t), sizeof(struct llc)); 3134 } 3135 3136 M_PREPEND(*mp, ETHER_HDR_LEN, M_DONTWAIT); 3137 if (*mp == NULL) 3138 return (error); 3139 bcopy(&eh2, mtod(*mp, caddr_t), ETHER_HDR_LEN); 3140 3141 return (0); 3142 3143 bad: 3144 m_freem(*mp); 3145 *mp = NULL; 3146 return (error); 3147 } 3148 3149 /* 3150 * Perform basic checks on header size since 3151 * pfil assumes ip_input has already processed 3152 * it for it. Cut-and-pasted from ip_input.c. 3153 * Given how simple the IPv6 version is, 3154 * does the IPv4 version really need to be 3155 * this complicated? 3156 * 3157 * XXX Should we update ipstat here, or not? 3158 * XXX Right now we update ipstat but not 3159 * XXX csum_counter. 3160 */ 3161 static int 3162 bridge_ip_checkbasic(struct mbuf **mp) 3163 { 3164 struct mbuf *m = *mp; 3165 struct ip *ip; 3166 int len, hlen; 3167 u_short sum; 3168 3169 if (*mp == NULL) 3170 return (-1); 3171 3172 if (IP_HDR_ALIGNED_P(mtod(m, caddr_t)) == 0) { 3173 if ((m = m_copyup(m, sizeof(struct ip), 3174 (max_linkhdr + 3) & ~3)) == NULL) { 3175 /* XXXJRT new stat, please */ 3176 ipstat.ips_toosmall++; 3177 goto bad; 3178 } 3179 } else if (__predict_false(m->m_len < sizeof (struct ip))) { 3180 if ((m = m_pullup(m, sizeof (struct ip))) == NULL) { 3181 ipstat.ips_toosmall++; 3182 goto bad; 3183 } 3184 } 3185 ip = mtod(m, struct ip *); 3186 if (ip == NULL) goto bad; 3187 3188 if (ip->ip_v != IPVERSION) { 3189 ipstat.ips_badvers++; 3190 goto bad; 3191 } 3192 hlen = ip->ip_hl << 2; 3193 if (hlen < sizeof(struct ip)) { /* minimum header length */ 3194 ipstat.ips_badhlen++; 3195 goto bad; 3196 } 3197 if (hlen > m->m_len) { 3198 if ((m = m_pullup(m, hlen)) == 0) { 3199 ipstat.ips_badhlen++; 3200 goto bad; 3201 } 3202 ip = mtod(m, struct ip *); 3203 if (ip == NULL) goto bad; 3204 } 3205 3206 if (m->m_pkthdr.csum_flags & CSUM_IP_CHECKED) { 3207 sum = !(m->m_pkthdr.csum_flags & CSUM_IP_VALID); 3208 } else { 3209 if (hlen == sizeof(struct ip)) { 3210 sum = in_cksum_hdr(ip); 3211 } else { 3212 sum = in_cksum(m, hlen); 3213 } 3214 } 3215 if (sum) { 3216 ipstat.ips_badsum++; 3217 goto bad; 3218 } 3219 3220 /* Retrieve the packet length. */ 3221 len = ntohs(ip->ip_len); 3222 3223 /* 3224 * Check for additional length bogosity 3225 */ 3226 if (len < hlen) { 3227 ipstat.ips_badlen++; 3228 goto bad; 3229 } 3230 3231 /* 3232 * Check that the amount of data in the buffers 3233 * is as at least much as the IP header would have us expect. 3234 * Drop packet if shorter than we expect. 3235 */ 3236 if (m->m_pkthdr.len < len) { 3237 ipstat.ips_tooshort++; 3238 goto bad; 3239 } 3240 3241 /* Checks out, proceed */ 3242 *mp = m; 3243 return (0); 3244 3245 bad: 3246 *mp = m; 3247 return (-1); 3248 } 3249 3250 #ifdef INET6 3251 /* 3252 * Same as above, but for IPv6. 3253 * Cut-and-pasted from ip6_input.c. 3254 * XXX Should we update ip6stat, or not? 3255 */ 3256 static int 3257 bridge_ip6_checkbasic(struct mbuf **mp) 3258 { 3259 struct mbuf *m = *mp; 3260 struct ip6_hdr *ip6; 3261 3262 /* 3263 * If the IPv6 header is not aligned, slurp it up into a new 3264 * mbuf with space for link headers, in the event we forward 3265 * it. Otherwise, if it is aligned, make sure the entire base 3266 * IPv6 header is in the first mbuf of the chain. 3267 */ 3268 if (IP6_HDR_ALIGNED_P(mtod(m, caddr_t)) == 0) { 3269 struct ifnet *inifp = m->m_pkthdr.rcvif; 3270 if ((m = m_copyup(m, sizeof(struct ip6_hdr), 3271 (max_linkhdr + 3) & ~3)) == NULL) { 3272 /* XXXJRT new stat, please */ 3273 ip6stat.ip6s_toosmall++; 3274 in6_ifstat_inc(inifp, ifs6_in_hdrerr); 3275 goto bad; 3276 } 3277 } else if (__predict_false(m->m_len < sizeof(struct ip6_hdr))) { 3278 struct ifnet *inifp = m->m_pkthdr.rcvif; 3279 if ((m = m_pullup(m, sizeof(struct ip6_hdr))) == NULL) { 3280 ip6stat.ip6s_toosmall++; 3281 in6_ifstat_inc(inifp, ifs6_in_hdrerr); 3282 goto bad; 3283 } 3284 } 3285 3286 ip6 = mtod(m, struct ip6_hdr *); 3287 3288 if ((ip6->ip6_vfc & IPV6_VERSION_MASK) != IPV6_VERSION) { 3289 ip6stat.ip6s_badvers++; 3290 in6_ifstat_inc(m->m_pkthdr.rcvif, ifs6_in_hdrerr); 3291 goto bad; 3292 } 3293 3294 /* Checks out, proceed */ 3295 *mp = m; 3296 return (0); 3297 3298 bad: 3299 *mp = m; 3300 return (-1); 3301 } 3302 #endif /* INET6 */ 3303 3304 /* 3305 * bridge_fragment: 3306 * 3307 * Return a fragmented mbuf chain. 3308 */ 3309 static int 3310 bridge_fragment(struct ifnet *ifp, struct mbuf *m, struct ether_header *eh, 3311 int snap, struct llc *llc) 3312 { 3313 struct mbuf *m0; 3314 struct ip *ip; 3315 int error = -1; 3316 3317 if (m->m_len < sizeof(struct ip) && 3318 (m = m_pullup(m, sizeof(struct ip))) == NULL) 3319 goto out; 3320 ip = mtod(m, struct ip *); 3321 3322 error = ip_fragment(ip, &m, ifp->if_mtu, ifp->if_hwassist, 3323 CSUM_DELAY_IP); 3324 if (error) 3325 goto out; 3326 3327 /* walk the chain and re-add the Ethernet header */ 3328 for (m0 = m; m0; m0 = m0->m_nextpkt) { 3329 if (error == 0) { 3330 if (snap) { 3331 M_PREPEND(m0, sizeof(struct llc), M_DONTWAIT); 3332 if (m0 == NULL) { 3333 error = ENOBUFS; 3334 continue; 3335 } 3336 bcopy(llc, mtod(m0, caddr_t), 3337 sizeof(struct llc)); 3338 } 3339 M_PREPEND(m0, ETHER_HDR_LEN, M_DONTWAIT); 3340 if (m0 == NULL) { 3341 error = ENOBUFS; 3342 continue; 3343 } 3344 bcopy(eh, mtod(m0, caddr_t), ETHER_HDR_LEN); 3345 } else 3346 m_freem(m); 3347 } 3348 3349 if (error == 0) 3350 ipstat.ips_fragmented++; 3351 3352 return (error); 3353 3354 out: 3355 if (m != NULL) 3356 m_freem(m); 3357 return (error); 3358 } 3359