1 /* $NetBSD: if_bridge.c,v 1.31 2005/06/01 19:45:34 jdc Exp $ */ 2 3 /* 4 * Copyright 2001 Wasabi Systems, Inc. 5 * All rights reserved. 6 * 7 * Written by Jason R. Thorpe for Wasabi Systems, Inc. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 3. All advertising materials mentioning features or use of this software 18 * must display the following acknowledgement: 19 * This product includes software developed for the NetBSD Project by 20 * Wasabi Systems, Inc. 21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse 22 * or promote products derived from this software without specific prior 23 * written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND 26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC 29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 35 * POSSIBILITY OF SUCH DAMAGE. 36 */ 37 38 /* 39 * Copyright (c) 1999, 2000 Jason L. Wright (jason@thought.net) 40 * All rights reserved. 41 * 42 * Redistribution and use in source and binary forms, with or without 43 * modification, are permitted provided that the following conditions 44 * are met: 45 * 1. Redistributions of source code must retain the above copyright 46 * notice, this list of conditions and the following disclaimer. 47 * 2. Redistributions in binary form must reproduce the above copyright 48 * notice, this list of conditions and the following disclaimer in the 49 * documentation and/or other materials provided with the distribution. 50 * 51 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 52 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 53 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 54 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, 55 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 56 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 57 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 58 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 59 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN 60 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 61 * POSSIBILITY OF SUCH DAMAGE. 62 * 63 * OpenBSD: if_bridge.c,v 1.60 2001/06/15 03:38:33 itojun Exp 64 */ 65 66 /* 67 * Network interface bridge support. 68 * 69 * TODO: 70 * 71 * - Currently only supports Ethernet-like interfaces (Ethernet, 72 * 802.11, VLANs on Ethernet, etc.) Figure out a nice way 73 * to bridge other types of interfaces (FDDI-FDDI, and maybe 74 * consider heterogenous bridges). 75 */ 76 77 #include <sys/cdefs.h> 78 __FBSDID("$FreeBSD$"); 79 80 #include "opt_inet.h" 81 #include "opt_inet6.h" 82 #include "opt_carp.h" 83 84 #include <sys/param.h> 85 #include <sys/mbuf.h> 86 #include <sys/malloc.h> 87 #include <sys/protosw.h> 88 #include <sys/systm.h> 89 #include <sys/time.h> 90 #include <sys/socket.h> /* for net/if.h */ 91 #include <sys/sockio.h> 92 #include <sys/ctype.h> /* string functions */ 93 #include <sys/kernel.h> 94 #include <sys/random.h> 95 #include <sys/syslog.h> 96 #include <sys/sysctl.h> 97 #include <vm/uma.h> 98 #include <sys/module.h> 99 #include <sys/priv.h> 100 #include <sys/proc.h> 101 #include <sys/lock.h> 102 #include <sys/mutex.h> 103 #include <sys/rwlock.h> 104 105 #include <net/bpf.h> 106 #include <net/if.h> 107 #include <net/if_clone.h> 108 #include <net/if_dl.h> 109 #include <net/if_types.h> 110 #include <net/if_var.h> 111 #include <net/pfil.h> 112 113 #include <netinet/in.h> /* for struct arpcom */ 114 #include <netinet/in_systm.h> 115 #include <netinet/in_var.h> 116 #include <netinet/ip.h> 117 #include <netinet/ip_var.h> 118 #ifdef INET6 119 #include <netinet/ip6.h> 120 #include <netinet6/ip6_var.h> 121 #endif 122 #if defined(INET) || defined(INET6) 123 #ifdef DEV_CARP 124 #include <netinet/ip_carp.h> 125 #endif 126 #endif 127 #include <machine/in_cksum.h> 128 #include <netinet/if_ether.h> /* for struct arpcom */ 129 #include <net/bridgestp.h> 130 #include <net/if_bridgevar.h> 131 #include <net/if_llc.h> 132 #include <net/if_vlan_var.h> 133 134 #include <net/route.h> 135 #include <netinet/ip_fw.h> 136 #include <netinet/ip_dummynet.h> 137 138 /* 139 * Size of the route hash table. Must be a power of two. 140 */ 141 #ifndef BRIDGE_RTHASH_SIZE 142 #define BRIDGE_RTHASH_SIZE 1024 143 #endif 144 145 #define BRIDGE_RTHASH_MASK (BRIDGE_RTHASH_SIZE - 1) 146 147 /* 148 * Maximum number of addresses to cache. 149 */ 150 #ifndef BRIDGE_RTABLE_MAX 151 #define BRIDGE_RTABLE_MAX 100 152 #endif 153 154 /* 155 * Timeout (in seconds) for entries learned dynamically. 156 */ 157 #ifndef BRIDGE_RTABLE_TIMEOUT 158 #define BRIDGE_RTABLE_TIMEOUT (20 * 60) /* same as ARP */ 159 #endif 160 161 /* 162 * Number of seconds between walks of the route list. 163 */ 164 #ifndef BRIDGE_RTABLE_PRUNE_PERIOD 165 #define BRIDGE_RTABLE_PRUNE_PERIOD (5 * 60) 166 #endif 167 168 /* 169 * List of capabilities to possibly mask on the member interface. 170 */ 171 #define BRIDGE_IFCAPS_MASK (IFCAP_TOE|IFCAP_TSO|IFCAP_TXCSUM) 172 173 /* 174 * Bridge interface list entry. 175 */ 176 struct bridge_iflist { 177 LIST_ENTRY(bridge_iflist) bif_next; 178 struct ifnet *bif_ifp; /* member if */ 179 struct bstp_port bif_stp; /* STP state */ 180 uint32_t bif_flags; /* member if flags */ 181 int bif_savedcaps; /* saved capabilities */ 182 uint32_t bif_addrmax; /* max # of addresses */ 183 uint32_t bif_addrcnt; /* cur. # of addresses */ 184 uint32_t bif_addrexceeded;/* # of address violations */ 185 }; 186 187 /* 188 * Bridge route node. 189 */ 190 struct bridge_rtnode { 191 LIST_ENTRY(bridge_rtnode) brt_hash; /* hash table linkage */ 192 LIST_ENTRY(bridge_rtnode) brt_list; /* list linkage */ 193 struct bridge_iflist *brt_dst; /* destination if */ 194 unsigned long brt_expire; /* expiration time */ 195 uint8_t brt_flags; /* address flags */ 196 uint8_t brt_addr[ETHER_ADDR_LEN]; 197 uint16_t brt_vlan; /* vlan id */ 198 }; 199 #define brt_ifp brt_dst->bif_ifp 200 201 /* 202 * Software state for each bridge. 203 */ 204 struct bridge_softc { 205 struct ifnet *sc_ifp; /* make this an interface */ 206 LIST_ENTRY(bridge_softc) sc_list; 207 struct mtx sc_mtx; 208 struct cv sc_cv; 209 uint32_t sc_brtmax; /* max # of addresses */ 210 uint32_t sc_brtcnt; /* cur. # of addresses */ 211 uint32_t sc_brttimeout; /* rt timeout in seconds */ 212 struct callout sc_brcallout; /* bridge callout */ 213 uint32_t sc_iflist_ref; /* refcount for sc_iflist */ 214 uint32_t sc_iflist_xcnt; /* refcount for sc_iflist */ 215 LIST_HEAD(, bridge_iflist) sc_iflist; /* member interface list */ 216 LIST_HEAD(, bridge_rtnode) *sc_rthash; /* our forwarding table */ 217 LIST_HEAD(, bridge_rtnode) sc_rtlist; /* list version of above */ 218 uint32_t sc_rthash_key; /* key for hash */ 219 LIST_HEAD(, bridge_iflist) sc_spanlist; /* span ports list */ 220 struct bstp_state sc_stp; /* STP state */ 221 uint32_t sc_brtexceeded; /* # of cache drops */ 222 struct ifnet *sc_ifaddr; /* member mac copied from */ 223 u_char sc_defaddr[6]; /* Default MAC address */ 224 }; 225 226 static struct mtx bridge_list_mtx; 227 eventhandler_tag bridge_detach_cookie = NULL; 228 229 int bridge_rtable_prune_period = BRIDGE_RTABLE_PRUNE_PERIOD; 230 231 uma_zone_t bridge_rtnode_zone; 232 233 static int bridge_clone_create(struct if_clone *, int, caddr_t); 234 static void bridge_clone_destroy(struct ifnet *); 235 236 static int bridge_ioctl(struct ifnet *, u_long, caddr_t); 237 static void bridge_mutecaps(struct bridge_softc *); 238 static void bridge_set_ifcap(struct bridge_softc *, struct bridge_iflist *, 239 int); 240 static void bridge_ifdetach(void *arg __unused, struct ifnet *); 241 static void bridge_init(void *); 242 static void bridge_dummynet(struct mbuf *, struct ifnet *); 243 static void bridge_stop(struct ifnet *, int); 244 static void bridge_start(struct ifnet *); 245 static struct mbuf *bridge_input(struct ifnet *, struct mbuf *); 246 static int bridge_output(struct ifnet *, struct mbuf *, struct sockaddr *, 247 struct rtentry *); 248 static void bridge_enqueue(struct bridge_softc *, struct ifnet *, 249 struct mbuf *); 250 static void bridge_rtdelete(struct bridge_softc *, struct ifnet *ifp, int); 251 252 static void bridge_forward(struct bridge_softc *, struct bridge_iflist *, 253 struct mbuf *m); 254 255 static void bridge_timer(void *); 256 257 static void bridge_broadcast(struct bridge_softc *, struct ifnet *, 258 struct mbuf *, int); 259 static void bridge_span(struct bridge_softc *, struct mbuf *); 260 261 static int bridge_rtupdate(struct bridge_softc *, const uint8_t *, 262 uint16_t, struct bridge_iflist *, int, uint8_t); 263 static struct ifnet *bridge_rtlookup(struct bridge_softc *, const uint8_t *, 264 uint16_t); 265 static void bridge_rttrim(struct bridge_softc *); 266 static void bridge_rtage(struct bridge_softc *); 267 static void bridge_rtflush(struct bridge_softc *, int); 268 static int bridge_rtdaddr(struct bridge_softc *, const uint8_t *, 269 uint16_t); 270 271 static int bridge_rtable_init(struct bridge_softc *); 272 static void bridge_rtable_fini(struct bridge_softc *); 273 274 static int bridge_rtnode_addr_cmp(const uint8_t *, const uint8_t *); 275 static struct bridge_rtnode *bridge_rtnode_lookup(struct bridge_softc *, 276 const uint8_t *, uint16_t); 277 static int bridge_rtnode_insert(struct bridge_softc *, 278 struct bridge_rtnode *); 279 static void bridge_rtnode_destroy(struct bridge_softc *, 280 struct bridge_rtnode *); 281 static void bridge_rtable_expire(struct ifnet *, int); 282 static void bridge_state_change(struct ifnet *, int); 283 284 static struct bridge_iflist *bridge_lookup_member(struct bridge_softc *, 285 const char *name); 286 static struct bridge_iflist *bridge_lookup_member_if(struct bridge_softc *, 287 struct ifnet *ifp); 288 static void bridge_delete_member(struct bridge_softc *, 289 struct bridge_iflist *, int); 290 static void bridge_delete_span(struct bridge_softc *, 291 struct bridge_iflist *); 292 293 static int bridge_ioctl_add(struct bridge_softc *, void *); 294 static int bridge_ioctl_del(struct bridge_softc *, void *); 295 static int bridge_ioctl_gifflags(struct bridge_softc *, void *); 296 static int bridge_ioctl_sifflags(struct bridge_softc *, void *); 297 static int bridge_ioctl_scache(struct bridge_softc *, void *); 298 static int bridge_ioctl_gcache(struct bridge_softc *, void *); 299 static int bridge_ioctl_gifs(struct bridge_softc *, void *); 300 static int bridge_ioctl_rts(struct bridge_softc *, void *); 301 static int bridge_ioctl_saddr(struct bridge_softc *, void *); 302 static int bridge_ioctl_sto(struct bridge_softc *, void *); 303 static int bridge_ioctl_gto(struct bridge_softc *, void *); 304 static int bridge_ioctl_daddr(struct bridge_softc *, void *); 305 static int bridge_ioctl_flush(struct bridge_softc *, void *); 306 static int bridge_ioctl_gpri(struct bridge_softc *, void *); 307 static int bridge_ioctl_spri(struct bridge_softc *, void *); 308 static int bridge_ioctl_ght(struct bridge_softc *, void *); 309 static int bridge_ioctl_sht(struct bridge_softc *, void *); 310 static int bridge_ioctl_gfd(struct bridge_softc *, void *); 311 static int bridge_ioctl_sfd(struct bridge_softc *, void *); 312 static int bridge_ioctl_gma(struct bridge_softc *, void *); 313 static int bridge_ioctl_sma(struct bridge_softc *, void *); 314 static int bridge_ioctl_sifprio(struct bridge_softc *, void *); 315 static int bridge_ioctl_sifcost(struct bridge_softc *, void *); 316 static int bridge_ioctl_sifmaxaddr(struct bridge_softc *, void *); 317 static int bridge_ioctl_addspan(struct bridge_softc *, void *); 318 static int bridge_ioctl_delspan(struct bridge_softc *, void *); 319 static int bridge_ioctl_gbparam(struct bridge_softc *, void *); 320 static int bridge_ioctl_grte(struct bridge_softc *, void *); 321 static int bridge_ioctl_gifsstp(struct bridge_softc *, void *); 322 static int bridge_ioctl_sproto(struct bridge_softc *, void *); 323 static int bridge_ioctl_stxhc(struct bridge_softc *, void *); 324 static int bridge_pfil(struct mbuf **, struct ifnet *, struct ifnet *, 325 int); 326 static int bridge_ip_checkbasic(struct mbuf **mp); 327 #ifdef INET6 328 static int bridge_ip6_checkbasic(struct mbuf **mp); 329 #endif /* INET6 */ 330 static int bridge_fragment(struct ifnet *, struct mbuf *, 331 struct ether_header *, int, struct llc *); 332 333 /* The default bridge vlan is 1 (IEEE 802.1Q-2003 Table 9-2) */ 334 #define VLANTAGOF(_m) \ 335 (_m->m_flags & M_VLANTAG) ? EVL_VLANOFTAG(_m->m_pkthdr.ether_vtag) : 1 336 337 static struct bstp_cb_ops bridge_ops = { 338 .bcb_state = bridge_state_change, 339 .bcb_rtage = bridge_rtable_expire 340 }; 341 342 SYSCTL_DECL(_net_link); 343 SYSCTL_NODE(_net_link, IFT_BRIDGE, bridge, CTLFLAG_RW, 0, "Bridge"); 344 345 static int pfil_onlyip = 1; /* only pass IP[46] packets when pfil is enabled */ 346 static int pfil_bridge = 1; /* run pfil hooks on the bridge interface */ 347 static int pfil_member = 1; /* run pfil hooks on the member interface */ 348 static int pfil_ipfw = 0; /* layer2 filter with ipfw */ 349 static int pfil_ipfw_arp = 0; /* layer2 filter with ipfw */ 350 static int pfil_local_phys = 0; /* run pfil hooks on the physical interface for 351 locally destined packets */ 352 static int log_stp = 0; /* log STP state changes */ 353 static int bridge_inherit_mac = 0; /* share MAC with first bridge member */ 354 SYSCTL_INT(_net_link_bridge, OID_AUTO, pfil_onlyip, CTLFLAG_RW, 355 &pfil_onlyip, 0, "Only pass IP packets when pfil is enabled"); 356 SYSCTL_INT(_net_link_bridge, OID_AUTO, ipfw_arp, CTLFLAG_RW, 357 &pfil_ipfw_arp, 0, "Filter ARP packets through IPFW layer2"); 358 SYSCTL_INT(_net_link_bridge, OID_AUTO, pfil_bridge, CTLFLAG_RW, 359 &pfil_bridge, 0, "Packet filter on the bridge interface"); 360 SYSCTL_INT(_net_link_bridge, OID_AUTO, pfil_member, CTLFLAG_RW, 361 &pfil_member, 0, "Packet filter on the member interface"); 362 SYSCTL_INT(_net_link_bridge, OID_AUTO, pfil_local_phys, CTLFLAG_RW, 363 &pfil_local_phys, 0, 364 "Packet filter on the physical interface for locally destined packets"); 365 SYSCTL_INT(_net_link_bridge, OID_AUTO, log_stp, CTLFLAG_RW, 366 &log_stp, 0, "Log STP state changes"); 367 SYSCTL_INT(_net_link_bridge, OID_AUTO, inherit_mac, CTLFLAG_RW, 368 &bridge_inherit_mac, 0, 369 "Inherit MAC address from the first bridge member"); 370 371 struct bridge_control { 372 int (*bc_func)(struct bridge_softc *, void *); 373 int bc_argsize; 374 int bc_flags; 375 }; 376 377 #define BC_F_COPYIN 0x01 /* copy arguments in */ 378 #define BC_F_COPYOUT 0x02 /* copy arguments out */ 379 #define BC_F_SUSER 0x04 /* do super-user check */ 380 381 const struct bridge_control bridge_control_table[] = { 382 { bridge_ioctl_add, sizeof(struct ifbreq), 383 BC_F_COPYIN|BC_F_SUSER }, 384 { bridge_ioctl_del, sizeof(struct ifbreq), 385 BC_F_COPYIN|BC_F_SUSER }, 386 387 { bridge_ioctl_gifflags, sizeof(struct ifbreq), 388 BC_F_COPYIN|BC_F_COPYOUT }, 389 { bridge_ioctl_sifflags, sizeof(struct ifbreq), 390 BC_F_COPYIN|BC_F_SUSER }, 391 392 { bridge_ioctl_scache, sizeof(struct ifbrparam), 393 BC_F_COPYIN|BC_F_SUSER }, 394 { bridge_ioctl_gcache, sizeof(struct ifbrparam), 395 BC_F_COPYOUT }, 396 397 { bridge_ioctl_gifs, sizeof(struct ifbifconf), 398 BC_F_COPYIN|BC_F_COPYOUT }, 399 { bridge_ioctl_rts, sizeof(struct ifbaconf), 400 BC_F_COPYIN|BC_F_COPYOUT }, 401 402 { bridge_ioctl_saddr, sizeof(struct ifbareq), 403 BC_F_COPYIN|BC_F_SUSER }, 404 405 { bridge_ioctl_sto, sizeof(struct ifbrparam), 406 BC_F_COPYIN|BC_F_SUSER }, 407 { bridge_ioctl_gto, sizeof(struct ifbrparam), 408 BC_F_COPYOUT }, 409 410 { bridge_ioctl_daddr, sizeof(struct ifbareq), 411 BC_F_COPYIN|BC_F_SUSER }, 412 413 { bridge_ioctl_flush, sizeof(struct ifbreq), 414 BC_F_COPYIN|BC_F_SUSER }, 415 416 { bridge_ioctl_gpri, sizeof(struct ifbrparam), 417 BC_F_COPYOUT }, 418 { bridge_ioctl_spri, sizeof(struct ifbrparam), 419 BC_F_COPYIN|BC_F_SUSER }, 420 421 { bridge_ioctl_ght, sizeof(struct ifbrparam), 422 BC_F_COPYOUT }, 423 { bridge_ioctl_sht, sizeof(struct ifbrparam), 424 BC_F_COPYIN|BC_F_SUSER }, 425 426 { bridge_ioctl_gfd, sizeof(struct ifbrparam), 427 BC_F_COPYOUT }, 428 { bridge_ioctl_sfd, sizeof(struct ifbrparam), 429 BC_F_COPYIN|BC_F_SUSER }, 430 431 { bridge_ioctl_gma, sizeof(struct ifbrparam), 432 BC_F_COPYOUT }, 433 { bridge_ioctl_sma, sizeof(struct ifbrparam), 434 BC_F_COPYIN|BC_F_SUSER }, 435 436 { bridge_ioctl_sifprio, sizeof(struct ifbreq), 437 BC_F_COPYIN|BC_F_SUSER }, 438 439 { bridge_ioctl_sifcost, sizeof(struct ifbreq), 440 BC_F_COPYIN|BC_F_SUSER }, 441 442 { bridge_ioctl_addspan, sizeof(struct ifbreq), 443 BC_F_COPYIN|BC_F_SUSER }, 444 { bridge_ioctl_delspan, sizeof(struct ifbreq), 445 BC_F_COPYIN|BC_F_SUSER }, 446 447 { bridge_ioctl_gbparam, sizeof(struct ifbropreq), 448 BC_F_COPYOUT }, 449 450 { bridge_ioctl_grte, sizeof(struct ifbrparam), 451 BC_F_COPYOUT }, 452 453 { bridge_ioctl_gifsstp, sizeof(struct ifbpstpconf), 454 BC_F_COPYIN|BC_F_COPYOUT }, 455 456 { bridge_ioctl_sproto, sizeof(struct ifbrparam), 457 BC_F_COPYIN|BC_F_SUSER }, 458 459 { bridge_ioctl_stxhc, sizeof(struct ifbrparam), 460 BC_F_COPYIN|BC_F_SUSER }, 461 462 { bridge_ioctl_sifmaxaddr, sizeof(struct ifbreq), 463 BC_F_COPYIN|BC_F_SUSER }, 464 465 }; 466 const int bridge_control_table_size = 467 sizeof(bridge_control_table) / sizeof(bridge_control_table[0]); 468 469 LIST_HEAD(, bridge_softc) bridge_list; 470 471 IFC_SIMPLE_DECLARE(bridge, 0); 472 473 static int 474 bridge_modevent(module_t mod, int type, void *data) 475 { 476 477 switch (type) { 478 case MOD_LOAD: 479 mtx_init(&bridge_list_mtx, "if_bridge list", NULL, MTX_DEF); 480 if_clone_attach(&bridge_cloner); 481 bridge_rtnode_zone = uma_zcreate("bridge_rtnode", 482 sizeof(struct bridge_rtnode), NULL, NULL, NULL, NULL, 483 UMA_ALIGN_PTR, 0); 484 LIST_INIT(&bridge_list); 485 bridge_input_p = bridge_input; 486 bridge_output_p = bridge_output; 487 bridge_dn_p = bridge_dummynet; 488 bridge_detach_cookie = EVENTHANDLER_REGISTER( 489 ifnet_departure_event, bridge_ifdetach, NULL, 490 EVENTHANDLER_PRI_ANY); 491 break; 492 case MOD_UNLOAD: 493 EVENTHANDLER_DEREGISTER(ifnet_departure_event, 494 bridge_detach_cookie); 495 if_clone_detach(&bridge_cloner); 496 uma_zdestroy(bridge_rtnode_zone); 497 bridge_input_p = NULL; 498 bridge_output_p = NULL; 499 bridge_dn_p = NULL; 500 mtx_destroy(&bridge_list_mtx); 501 break; 502 default: 503 return (EOPNOTSUPP); 504 } 505 return (0); 506 } 507 508 static moduledata_t bridge_mod = { 509 "if_bridge", 510 bridge_modevent, 511 0 512 }; 513 514 DECLARE_MODULE(if_bridge, bridge_mod, SI_SUB_PSEUDO, SI_ORDER_ANY); 515 MODULE_DEPEND(if_bridge, bridgestp, 1, 1, 1); 516 517 /* 518 * handler for net.link.bridge.pfil_ipfw 519 */ 520 static int 521 sysctl_pfil_ipfw(SYSCTL_HANDLER_ARGS) 522 { 523 int enable = pfil_ipfw; 524 int error; 525 526 error = sysctl_handle_int(oidp, &enable, 0, req); 527 enable = (enable) ? 1 : 0; 528 529 if (enable != pfil_ipfw) { 530 pfil_ipfw = enable; 531 532 /* 533 * Disable pfil so that ipfw doesnt run twice, if the user 534 * really wants both then they can re-enable pfil_bridge and/or 535 * pfil_member. Also allow non-ip packets as ipfw can filter by 536 * layer2 type. 537 */ 538 if (pfil_ipfw) { 539 pfil_onlyip = 0; 540 pfil_bridge = 0; 541 pfil_member = 0; 542 } 543 } 544 545 return (error); 546 } 547 SYSCTL_PROC(_net_link_bridge, OID_AUTO, ipfw, CTLTYPE_INT|CTLFLAG_RW, 548 &pfil_ipfw, 0, &sysctl_pfil_ipfw, "I", "Layer2 filter with IPFW"); 549 550 /* 551 * bridge_clone_create: 552 * 553 * Create a new bridge instance. 554 */ 555 static int 556 bridge_clone_create(struct if_clone *ifc, int unit, caddr_t params) 557 { 558 struct bridge_softc *sc, *sc2; 559 struct ifnet *bifp, *ifp; 560 int retry; 561 562 sc = malloc(sizeof(*sc), M_DEVBUF, M_WAITOK|M_ZERO); 563 ifp = sc->sc_ifp = if_alloc(IFT_ETHER); 564 if (ifp == NULL) { 565 free(sc, M_DEVBUF); 566 return (ENOSPC); 567 } 568 569 BRIDGE_LOCK_INIT(sc); 570 sc->sc_brtmax = BRIDGE_RTABLE_MAX; 571 sc->sc_brttimeout = BRIDGE_RTABLE_TIMEOUT; 572 573 /* Initialize our routing table. */ 574 bridge_rtable_init(sc); 575 576 callout_init_mtx(&sc->sc_brcallout, &sc->sc_mtx, 0); 577 578 LIST_INIT(&sc->sc_iflist); 579 LIST_INIT(&sc->sc_spanlist); 580 581 ifp->if_softc = sc; 582 if_initname(ifp, ifc->ifc_name, unit); 583 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 584 ifp->if_ioctl = bridge_ioctl; 585 ifp->if_start = bridge_start; 586 ifp->if_init = bridge_init; 587 ifp->if_type = IFT_BRIDGE; 588 IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen); 589 ifp->if_snd.ifq_drv_maxlen = ifqmaxlen; 590 IFQ_SET_READY(&ifp->if_snd); 591 592 /* 593 * Generate a random ethernet address with a locally administered 594 * address. 595 * 596 * Since we are using random ethernet addresses for the bridge, it is 597 * possible that we might have address collisions, so make sure that 598 * this hardware address isn't already in use on another bridge. 599 */ 600 for (retry = 1; retry != 0;) { 601 arc4rand(sc->sc_defaddr, ETHER_ADDR_LEN, 1); 602 sc->sc_defaddr[0] &= ~1; /* clear multicast bit */ 603 sc->sc_defaddr[0] |= 2; /* set the LAA bit */ 604 retry = 0; 605 mtx_lock(&bridge_list_mtx); 606 LIST_FOREACH(sc2, &bridge_list, sc_list) { 607 bifp = sc2->sc_ifp; 608 if (memcmp(sc->sc_defaddr, 609 IF_LLADDR(bifp), ETHER_ADDR_LEN) == 0) 610 retry = 1; 611 } 612 mtx_unlock(&bridge_list_mtx); 613 } 614 615 bstp_attach(&sc->sc_stp, &bridge_ops); 616 ether_ifattach(ifp, sc->sc_defaddr); 617 /* Now undo some of the damage... */ 618 ifp->if_baudrate = 0; 619 ifp->if_type = IFT_BRIDGE; 620 621 mtx_lock(&bridge_list_mtx); 622 LIST_INSERT_HEAD(&bridge_list, sc, sc_list); 623 mtx_unlock(&bridge_list_mtx); 624 625 return (0); 626 } 627 628 /* 629 * bridge_clone_destroy: 630 * 631 * Destroy a bridge instance. 632 */ 633 static void 634 bridge_clone_destroy(struct ifnet *ifp) 635 { 636 struct bridge_softc *sc = ifp->if_softc; 637 struct bridge_iflist *bif; 638 639 BRIDGE_LOCK(sc); 640 641 bridge_stop(ifp, 1); 642 ifp->if_flags &= ~IFF_UP; 643 644 while ((bif = LIST_FIRST(&sc->sc_iflist)) != NULL) 645 bridge_delete_member(sc, bif, 0); 646 647 while ((bif = LIST_FIRST(&sc->sc_spanlist)) != NULL) { 648 bridge_delete_span(sc, bif); 649 } 650 651 BRIDGE_UNLOCK(sc); 652 653 callout_drain(&sc->sc_brcallout); 654 655 mtx_lock(&bridge_list_mtx); 656 LIST_REMOVE(sc, sc_list); 657 mtx_unlock(&bridge_list_mtx); 658 659 bstp_detach(&sc->sc_stp); 660 ether_ifdetach(ifp); 661 if_free_type(ifp, IFT_ETHER); 662 663 /* Tear down the routing table. */ 664 bridge_rtable_fini(sc); 665 666 BRIDGE_LOCK_DESTROY(sc); 667 free(sc, M_DEVBUF); 668 } 669 670 /* 671 * bridge_ioctl: 672 * 673 * Handle a control request from the operator. 674 */ 675 static int 676 bridge_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 677 { 678 struct bridge_softc *sc = ifp->if_softc; 679 struct thread *td = curthread; 680 union { 681 struct ifbreq ifbreq; 682 struct ifbifconf ifbifconf; 683 struct ifbareq ifbareq; 684 struct ifbaconf ifbaconf; 685 struct ifbrparam ifbrparam; 686 struct ifbropreq ifbropreq; 687 } args; 688 struct ifdrv *ifd = (struct ifdrv *) data; 689 const struct bridge_control *bc; 690 int error = 0; 691 692 switch (cmd) { 693 694 case SIOCADDMULTI: 695 case SIOCDELMULTI: 696 break; 697 698 case SIOCGDRVSPEC: 699 case SIOCSDRVSPEC: 700 if (ifd->ifd_cmd >= bridge_control_table_size) { 701 error = EINVAL; 702 break; 703 } 704 bc = &bridge_control_table[ifd->ifd_cmd]; 705 706 if (cmd == SIOCGDRVSPEC && 707 (bc->bc_flags & BC_F_COPYOUT) == 0) { 708 error = EINVAL; 709 break; 710 } 711 else if (cmd == SIOCSDRVSPEC && 712 (bc->bc_flags & BC_F_COPYOUT) != 0) { 713 error = EINVAL; 714 break; 715 } 716 717 if (bc->bc_flags & BC_F_SUSER) { 718 error = priv_check(td, PRIV_NET_BRIDGE); 719 if (error) 720 break; 721 } 722 723 if (ifd->ifd_len != bc->bc_argsize || 724 ifd->ifd_len > sizeof(args)) { 725 error = EINVAL; 726 break; 727 } 728 729 bzero(&args, sizeof(args)); 730 if (bc->bc_flags & BC_F_COPYIN) { 731 error = copyin(ifd->ifd_data, &args, ifd->ifd_len); 732 if (error) 733 break; 734 } 735 736 BRIDGE_LOCK(sc); 737 error = (*bc->bc_func)(sc, &args); 738 BRIDGE_UNLOCK(sc); 739 if (error) 740 break; 741 742 if (bc->bc_flags & BC_F_COPYOUT) 743 error = copyout(&args, ifd->ifd_data, ifd->ifd_len); 744 745 break; 746 747 case SIOCSIFFLAGS: 748 if (!(ifp->if_flags & IFF_UP) && 749 (ifp->if_drv_flags & IFF_DRV_RUNNING)) { 750 /* 751 * If interface is marked down and it is running, 752 * then stop and disable it. 753 */ 754 BRIDGE_LOCK(sc); 755 bridge_stop(ifp, 1); 756 BRIDGE_UNLOCK(sc); 757 } else if ((ifp->if_flags & IFF_UP) && 758 !(ifp->if_drv_flags & IFF_DRV_RUNNING)) { 759 /* 760 * If interface is marked up and it is stopped, then 761 * start it. 762 */ 763 (*ifp->if_init)(sc); 764 } 765 break; 766 767 case SIOCSIFMTU: 768 /* Do not allow the MTU to be changed on the bridge */ 769 error = EINVAL; 770 break; 771 772 default: 773 /* 774 * drop the lock as ether_ioctl() will call bridge_start() and 775 * cause the lock to be recursed. 776 */ 777 error = ether_ioctl(ifp, cmd, data); 778 break; 779 } 780 781 return (error); 782 } 783 784 /* 785 * bridge_mutecaps: 786 * 787 * Clear or restore unwanted capabilities on the member interface 788 */ 789 static void 790 bridge_mutecaps(struct bridge_softc *sc) 791 { 792 struct bridge_iflist *bif; 793 int enabled, mask; 794 795 /* Initial bitmask of capabilities to test */ 796 mask = BRIDGE_IFCAPS_MASK; 797 798 LIST_FOREACH(bif, &sc->sc_iflist, bif_next) { 799 /* Every member must support it or its disabled */ 800 mask &= bif->bif_savedcaps; 801 } 802 803 LIST_FOREACH(bif, &sc->sc_iflist, bif_next) { 804 enabled = bif->bif_ifp->if_capenable; 805 /* strip off mask bits and enable them again if allowed */ 806 enabled &= ~BRIDGE_IFCAPS_MASK; 807 enabled |= mask; 808 /* 809 * Receive offload can only be enabled if all members also 810 * support send offload. 811 */ 812 if ((enabled & IFCAP_TSO) == 0) 813 enabled &= ~IFCAP_LRO; 814 815 bridge_set_ifcap(sc, bif, enabled); 816 } 817 818 } 819 820 static void 821 bridge_set_ifcap(struct bridge_softc *sc, struct bridge_iflist *bif, int set) 822 { 823 struct ifnet *ifp = bif->bif_ifp; 824 struct ifreq ifr; 825 int error; 826 827 bzero(&ifr, sizeof(ifr)); 828 ifr.ifr_reqcap = set; 829 830 if (ifp->if_capenable != set) { 831 error = (*ifp->if_ioctl)(ifp, SIOCSIFCAP, (caddr_t)&ifr); 832 if (error) 833 if_printf(sc->sc_ifp, 834 "error setting interface capabilities on %s\n", 835 ifp->if_xname); 836 } 837 } 838 839 /* 840 * bridge_lookup_member: 841 * 842 * Lookup a bridge member interface. 843 */ 844 static struct bridge_iflist * 845 bridge_lookup_member(struct bridge_softc *sc, const char *name) 846 { 847 struct bridge_iflist *bif; 848 struct ifnet *ifp; 849 850 BRIDGE_LOCK_ASSERT(sc); 851 852 LIST_FOREACH(bif, &sc->sc_iflist, bif_next) { 853 ifp = bif->bif_ifp; 854 if (strcmp(ifp->if_xname, name) == 0) 855 return (bif); 856 } 857 858 return (NULL); 859 } 860 861 /* 862 * bridge_lookup_member_if: 863 * 864 * Lookup a bridge member interface by ifnet*. 865 */ 866 static struct bridge_iflist * 867 bridge_lookup_member_if(struct bridge_softc *sc, struct ifnet *member_ifp) 868 { 869 struct bridge_iflist *bif; 870 871 BRIDGE_LOCK_ASSERT(sc); 872 873 LIST_FOREACH(bif, &sc->sc_iflist, bif_next) { 874 if (bif->bif_ifp == member_ifp) 875 return (bif); 876 } 877 878 return (NULL); 879 } 880 881 /* 882 * bridge_delete_member: 883 * 884 * Delete the specified member interface. 885 */ 886 static void 887 bridge_delete_member(struct bridge_softc *sc, struct bridge_iflist *bif, 888 int gone) 889 { 890 struct ifnet *ifs = bif->bif_ifp; 891 struct ifnet *fif = NULL; 892 893 BRIDGE_LOCK_ASSERT(sc); 894 895 if (bif->bif_flags & IFBIF_STP) 896 bstp_disable(&bif->bif_stp); 897 898 ifs->if_bridge = NULL; 899 BRIDGE_XLOCK(sc); 900 LIST_REMOVE(bif, bif_next); 901 BRIDGE_XDROP(sc); 902 903 /* 904 * If removing the interface that gave the bridge its mac address, set 905 * the mac address of the bridge to the address of the next member, or 906 * to its default address if no members are left. 907 */ 908 if (bridge_inherit_mac && sc->sc_ifaddr == ifs) { 909 if (LIST_EMPTY(&sc->sc_iflist)) { 910 bcopy(sc->sc_defaddr, 911 IF_LLADDR(sc->sc_ifp), ETHER_ADDR_LEN); 912 sc->sc_ifaddr = NULL; 913 } else { 914 fif = LIST_FIRST(&sc->sc_iflist)->bif_ifp; 915 bcopy(IF_LLADDR(fif), 916 IF_LLADDR(sc->sc_ifp), ETHER_ADDR_LEN); 917 sc->sc_ifaddr = fif; 918 } 919 } 920 921 bridge_mutecaps(sc); /* recalcuate now this interface is removed */ 922 bridge_rtdelete(sc, ifs, IFBF_FLUSHALL); 923 KASSERT(bif->bif_addrcnt == 0, 924 ("%s: %d bridge routes referenced", __func__, bif->bif_addrcnt)); 925 926 BRIDGE_UNLOCK(sc); 927 if (!gone) { 928 switch (ifs->if_type) { 929 case IFT_ETHER: 930 case IFT_L2VLAN: 931 /* 932 * Take the interface out of promiscuous mode. 933 */ 934 (void) ifpromisc(ifs, 0); 935 break; 936 937 case IFT_GIF: 938 break; 939 940 default: 941 #ifdef DIAGNOSTIC 942 panic("bridge_delete_member: impossible"); 943 #endif 944 break; 945 } 946 /* reneable any interface capabilities */ 947 bridge_set_ifcap(sc, bif, bif->bif_savedcaps); 948 } 949 bstp_destroy(&bif->bif_stp); /* prepare to free */ 950 BRIDGE_LOCK(sc); 951 free(bif, M_DEVBUF); 952 } 953 954 /* 955 * bridge_delete_span: 956 * 957 * Delete the specified span interface. 958 */ 959 static void 960 bridge_delete_span(struct bridge_softc *sc, struct bridge_iflist *bif) 961 { 962 BRIDGE_LOCK_ASSERT(sc); 963 964 KASSERT(bif->bif_ifp->if_bridge == NULL, 965 ("%s: not a span interface", __func__)); 966 967 LIST_REMOVE(bif, bif_next); 968 free(bif, M_DEVBUF); 969 } 970 971 static int 972 bridge_ioctl_add(struct bridge_softc *sc, void *arg) 973 { 974 struct ifbreq *req = arg; 975 struct bridge_iflist *bif = NULL; 976 struct ifnet *ifs; 977 int error = 0; 978 979 ifs = ifunit(req->ifbr_ifsname); 980 if (ifs == NULL) 981 return (ENOENT); 982 if (ifs->if_ioctl == NULL) /* must be supported */ 983 return (EINVAL); 984 985 /* If it's in the span list, it can't be a member. */ 986 LIST_FOREACH(bif, &sc->sc_spanlist, bif_next) 987 if (ifs == bif->bif_ifp) 988 return (EBUSY); 989 990 /* Allow the first Ethernet member to define the MTU */ 991 if (ifs->if_type != IFT_GIF) { 992 if (LIST_EMPTY(&sc->sc_iflist)) 993 sc->sc_ifp->if_mtu = ifs->if_mtu; 994 else if (sc->sc_ifp->if_mtu != ifs->if_mtu) { 995 if_printf(sc->sc_ifp, "invalid MTU for %s\n", 996 ifs->if_xname); 997 return (EINVAL); 998 } 999 } 1000 1001 if (ifs->if_bridge == sc) 1002 return (EEXIST); 1003 1004 if (ifs->if_bridge != NULL) 1005 return (EBUSY); 1006 1007 bif = malloc(sizeof(*bif), M_DEVBUF, M_NOWAIT|M_ZERO); 1008 if (bif == NULL) 1009 return (ENOMEM); 1010 1011 bif->bif_ifp = ifs; 1012 bif->bif_flags = IFBIF_LEARNING | IFBIF_DISCOVER; 1013 bif->bif_savedcaps = ifs->if_capenable; 1014 1015 switch (ifs->if_type) { 1016 case IFT_ETHER: 1017 case IFT_L2VLAN: 1018 case IFT_GIF: 1019 /* permitted interface types */ 1020 break; 1021 default: 1022 error = EINVAL; 1023 goto out; 1024 } 1025 1026 /* 1027 * Assign the interface's MAC address to the bridge if it's the first 1028 * member and the MAC address of the bridge has not been changed from 1029 * the default randomly generated one. 1030 */ 1031 if (bridge_inherit_mac && LIST_EMPTY(&sc->sc_iflist) && 1032 !memcmp(IF_LLADDR(sc->sc_ifp), sc->sc_defaddr, ETHER_ADDR_LEN)) { 1033 bcopy(IF_LLADDR(ifs), IF_LLADDR(sc->sc_ifp), ETHER_ADDR_LEN); 1034 sc->sc_ifaddr = ifs; 1035 } 1036 1037 ifs->if_bridge = sc; 1038 bstp_create(&sc->sc_stp, &bif->bif_stp, bif->bif_ifp); 1039 /* 1040 * XXX: XLOCK HERE!?! 1041 * 1042 * NOTE: insert_***HEAD*** should be safe for the traversals. 1043 */ 1044 LIST_INSERT_HEAD(&sc->sc_iflist, bif, bif_next); 1045 1046 /* Set interface capabilities to the intersection set of all members */ 1047 bridge_mutecaps(sc); 1048 1049 switch (ifs->if_type) { 1050 case IFT_ETHER: 1051 case IFT_L2VLAN: 1052 /* 1053 * Place the interface into promiscuous mode. 1054 */ 1055 BRIDGE_UNLOCK(sc); 1056 error = ifpromisc(ifs, 1); 1057 BRIDGE_LOCK(sc); 1058 break; 1059 } 1060 if (error) 1061 bridge_delete_member(sc, bif, 0); 1062 out: 1063 if (error) { 1064 if (bif != NULL) 1065 free(bif, M_DEVBUF); 1066 } 1067 return (error); 1068 } 1069 1070 static int 1071 bridge_ioctl_del(struct bridge_softc *sc, void *arg) 1072 { 1073 struct ifbreq *req = arg; 1074 struct bridge_iflist *bif; 1075 1076 bif = bridge_lookup_member(sc, req->ifbr_ifsname); 1077 if (bif == NULL) 1078 return (ENOENT); 1079 1080 bridge_delete_member(sc, bif, 0); 1081 1082 return (0); 1083 } 1084 1085 static int 1086 bridge_ioctl_gifflags(struct bridge_softc *sc, void *arg) 1087 { 1088 struct ifbreq *req = arg; 1089 struct bridge_iflist *bif; 1090 struct bstp_port *bp; 1091 1092 bif = bridge_lookup_member(sc, req->ifbr_ifsname); 1093 if (bif == NULL) 1094 return (ENOENT); 1095 1096 bp = &bif->bif_stp; 1097 req->ifbr_ifsflags = bif->bif_flags; 1098 req->ifbr_state = bp->bp_state; 1099 req->ifbr_priority = bp->bp_priority; 1100 req->ifbr_path_cost = bp->bp_path_cost; 1101 req->ifbr_portno = bif->bif_ifp->if_index & 0xfff; 1102 req->ifbr_proto = bp->bp_protover; 1103 req->ifbr_role = bp->bp_role; 1104 req->ifbr_stpflags = bp->bp_flags; 1105 req->ifbr_addrcnt = bif->bif_addrcnt; 1106 req->ifbr_addrmax = bif->bif_addrmax; 1107 req->ifbr_addrexceeded = bif->bif_addrexceeded; 1108 1109 /* Copy STP state options as flags */ 1110 if (bp->bp_operedge) 1111 req->ifbr_ifsflags |= IFBIF_BSTP_EDGE; 1112 if (bp->bp_flags & BSTP_PORT_AUTOEDGE) 1113 req->ifbr_ifsflags |= IFBIF_BSTP_AUTOEDGE; 1114 if (bp->bp_ptp_link) 1115 req->ifbr_ifsflags |= IFBIF_BSTP_PTP; 1116 if (bp->bp_flags & BSTP_PORT_AUTOPTP) 1117 req->ifbr_ifsflags |= IFBIF_BSTP_AUTOPTP; 1118 if (bp->bp_flags & BSTP_PORT_ADMEDGE) 1119 req->ifbr_ifsflags |= IFBIF_BSTP_ADMEDGE; 1120 if (bp->bp_flags & BSTP_PORT_ADMCOST) 1121 req->ifbr_ifsflags |= IFBIF_BSTP_ADMCOST; 1122 return (0); 1123 } 1124 1125 static int 1126 bridge_ioctl_sifflags(struct bridge_softc *sc, void *arg) 1127 { 1128 struct ifbreq *req = arg; 1129 struct bridge_iflist *bif; 1130 struct bstp_port *bp; 1131 int error; 1132 1133 bif = bridge_lookup_member(sc, req->ifbr_ifsname); 1134 if (bif == NULL) 1135 return (ENOENT); 1136 bp = &bif->bif_stp; 1137 1138 if (req->ifbr_ifsflags & IFBIF_SPAN) 1139 /* SPAN is readonly */ 1140 return (EINVAL); 1141 1142 if (req->ifbr_ifsflags & IFBIF_STP) { 1143 if ((bif->bif_flags & IFBIF_STP) == 0) { 1144 error = bstp_enable(&bif->bif_stp); 1145 if (error) 1146 return (error); 1147 } 1148 } else { 1149 if ((bif->bif_flags & IFBIF_STP) != 0) 1150 bstp_disable(&bif->bif_stp); 1151 } 1152 1153 /* Pass on STP flags */ 1154 bstp_set_edge(bp, req->ifbr_ifsflags & IFBIF_BSTP_EDGE ? 1 : 0); 1155 bstp_set_autoedge(bp, req->ifbr_ifsflags & IFBIF_BSTP_AUTOEDGE ? 1 : 0); 1156 bstp_set_ptp(bp, req->ifbr_ifsflags & IFBIF_BSTP_PTP ? 1 : 0); 1157 bstp_set_autoptp(bp, req->ifbr_ifsflags & IFBIF_BSTP_AUTOPTP ? 1 : 0); 1158 1159 /* Save the bits relating to the bridge */ 1160 bif->bif_flags = req->ifbr_ifsflags & IFBIFMASK; 1161 1162 return (0); 1163 } 1164 1165 static int 1166 bridge_ioctl_scache(struct bridge_softc *sc, void *arg) 1167 { 1168 struct ifbrparam *param = arg; 1169 1170 sc->sc_brtmax = param->ifbrp_csize; 1171 bridge_rttrim(sc); 1172 1173 return (0); 1174 } 1175 1176 static int 1177 bridge_ioctl_gcache(struct bridge_softc *sc, void *arg) 1178 { 1179 struct ifbrparam *param = arg; 1180 1181 param->ifbrp_csize = sc->sc_brtmax; 1182 1183 return (0); 1184 } 1185 1186 static int 1187 bridge_ioctl_gifs(struct bridge_softc *sc, void *arg) 1188 { 1189 struct ifbifconf *bifc = arg; 1190 struct bridge_iflist *bif; 1191 struct ifbreq breq; 1192 char *buf, *outbuf; 1193 int count, buflen, len, error = 0; 1194 1195 count = 0; 1196 LIST_FOREACH(bif, &sc->sc_iflist, bif_next) 1197 count++; 1198 LIST_FOREACH(bif, &sc->sc_spanlist, bif_next) 1199 count++; 1200 1201 buflen = sizeof(breq) * count; 1202 if (bifc->ifbic_len == 0) { 1203 bifc->ifbic_len = buflen; 1204 return (0); 1205 } 1206 BRIDGE_UNLOCK(sc); 1207 outbuf = malloc(buflen, M_TEMP, M_WAITOK | M_ZERO); 1208 BRIDGE_LOCK(sc); 1209 1210 count = 0; 1211 buf = outbuf; 1212 len = min(bifc->ifbic_len, buflen); 1213 bzero(&breq, sizeof(breq)); 1214 LIST_FOREACH(bif, &sc->sc_iflist, bif_next) { 1215 if (len < sizeof(breq)) 1216 break; 1217 1218 strlcpy(breq.ifbr_ifsname, bif->bif_ifp->if_xname, 1219 sizeof(breq.ifbr_ifsname)); 1220 /* Fill in the ifbreq structure */ 1221 error = bridge_ioctl_gifflags(sc, &breq); 1222 if (error) 1223 break; 1224 memcpy(buf, &breq, sizeof(breq)); 1225 count++; 1226 buf += sizeof(breq); 1227 len -= sizeof(breq); 1228 } 1229 LIST_FOREACH(bif, &sc->sc_spanlist, bif_next) { 1230 if (len < sizeof(breq)) 1231 break; 1232 1233 strlcpy(breq.ifbr_ifsname, bif->bif_ifp->if_xname, 1234 sizeof(breq.ifbr_ifsname)); 1235 breq.ifbr_ifsflags = bif->bif_flags; 1236 breq.ifbr_portno = bif->bif_ifp->if_index & 0xfff; 1237 memcpy(buf, &breq, sizeof(breq)); 1238 count++; 1239 buf += sizeof(breq); 1240 len -= sizeof(breq); 1241 } 1242 1243 BRIDGE_UNLOCK(sc); 1244 bifc->ifbic_len = sizeof(breq) * count; 1245 error = copyout(outbuf, bifc->ifbic_req, bifc->ifbic_len); 1246 BRIDGE_LOCK(sc); 1247 free(outbuf, M_TEMP); 1248 return (error); 1249 } 1250 1251 static int 1252 bridge_ioctl_rts(struct bridge_softc *sc, void *arg) 1253 { 1254 struct ifbaconf *bac = arg; 1255 struct bridge_rtnode *brt; 1256 struct ifbareq bareq; 1257 char *buf, *outbuf; 1258 int count, buflen, len, error = 0; 1259 1260 if (bac->ifbac_len == 0) 1261 return (0); 1262 1263 count = 0; 1264 LIST_FOREACH(brt, &sc->sc_rtlist, brt_list) 1265 count++; 1266 buflen = sizeof(bareq) * count; 1267 1268 BRIDGE_UNLOCK(sc); 1269 outbuf = malloc(buflen, M_TEMP, M_WAITOK | M_ZERO); 1270 BRIDGE_LOCK(sc); 1271 1272 count = 0; 1273 buf = outbuf; 1274 len = min(bac->ifbac_len, buflen); 1275 bzero(&bareq, sizeof(bareq)); 1276 LIST_FOREACH(brt, &sc->sc_rtlist, brt_list) { 1277 if (len < sizeof(bareq)) 1278 goto out; 1279 strlcpy(bareq.ifba_ifsname, brt->brt_ifp->if_xname, 1280 sizeof(bareq.ifba_ifsname)); 1281 memcpy(bareq.ifba_dst, brt->brt_addr, sizeof(brt->brt_addr)); 1282 bareq.ifba_vlan = brt->brt_vlan; 1283 if ((brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC && 1284 time_uptime < brt->brt_expire) 1285 bareq.ifba_expire = brt->brt_expire - time_uptime; 1286 else 1287 bareq.ifba_expire = 0; 1288 bareq.ifba_flags = brt->brt_flags; 1289 1290 memcpy(buf, &bareq, sizeof(bareq)); 1291 count++; 1292 buf += sizeof(bareq); 1293 len -= sizeof(bareq); 1294 } 1295 out: 1296 BRIDGE_UNLOCK(sc); 1297 bac->ifbac_len = sizeof(bareq) * count; 1298 error = copyout(outbuf, bac->ifbac_req, bac->ifbac_len); 1299 BRIDGE_LOCK(sc); 1300 free(outbuf, M_TEMP); 1301 return (error); 1302 } 1303 1304 static int 1305 bridge_ioctl_saddr(struct bridge_softc *sc, void *arg) 1306 { 1307 struct ifbareq *req = arg; 1308 struct bridge_iflist *bif; 1309 int error; 1310 1311 bif = bridge_lookup_member(sc, req->ifba_ifsname); 1312 if (bif == NULL) 1313 return (ENOENT); 1314 1315 error = bridge_rtupdate(sc, req->ifba_dst, req->ifba_vlan, bif, 1, 1316 req->ifba_flags); 1317 1318 return (error); 1319 } 1320 1321 static int 1322 bridge_ioctl_sto(struct bridge_softc *sc, void *arg) 1323 { 1324 struct ifbrparam *param = arg; 1325 1326 sc->sc_brttimeout = param->ifbrp_ctime; 1327 return (0); 1328 } 1329 1330 static int 1331 bridge_ioctl_gto(struct bridge_softc *sc, void *arg) 1332 { 1333 struct ifbrparam *param = arg; 1334 1335 param->ifbrp_ctime = sc->sc_brttimeout; 1336 return (0); 1337 } 1338 1339 static int 1340 bridge_ioctl_daddr(struct bridge_softc *sc, void *arg) 1341 { 1342 struct ifbareq *req = arg; 1343 1344 return (bridge_rtdaddr(sc, req->ifba_dst, req->ifba_vlan)); 1345 } 1346 1347 static int 1348 bridge_ioctl_flush(struct bridge_softc *sc, void *arg) 1349 { 1350 struct ifbreq *req = arg; 1351 1352 bridge_rtflush(sc, req->ifbr_ifsflags); 1353 return (0); 1354 } 1355 1356 static int 1357 bridge_ioctl_gpri(struct bridge_softc *sc, void *arg) 1358 { 1359 struct ifbrparam *param = arg; 1360 struct bstp_state *bs = &sc->sc_stp; 1361 1362 param->ifbrp_prio = bs->bs_bridge_priority; 1363 return (0); 1364 } 1365 1366 static int 1367 bridge_ioctl_spri(struct bridge_softc *sc, void *arg) 1368 { 1369 struct ifbrparam *param = arg; 1370 1371 return (bstp_set_priority(&sc->sc_stp, param->ifbrp_prio)); 1372 } 1373 1374 static int 1375 bridge_ioctl_ght(struct bridge_softc *sc, void *arg) 1376 { 1377 struct ifbrparam *param = arg; 1378 struct bstp_state *bs = &sc->sc_stp; 1379 1380 param->ifbrp_hellotime = bs->bs_bridge_htime >> 8; 1381 return (0); 1382 } 1383 1384 static int 1385 bridge_ioctl_sht(struct bridge_softc *sc, void *arg) 1386 { 1387 struct ifbrparam *param = arg; 1388 1389 return (bstp_set_htime(&sc->sc_stp, param->ifbrp_hellotime)); 1390 } 1391 1392 static int 1393 bridge_ioctl_gfd(struct bridge_softc *sc, void *arg) 1394 { 1395 struct ifbrparam *param = arg; 1396 struct bstp_state *bs = &sc->sc_stp; 1397 1398 param->ifbrp_fwddelay = bs->bs_bridge_fdelay >> 8; 1399 return (0); 1400 } 1401 1402 static int 1403 bridge_ioctl_sfd(struct bridge_softc *sc, void *arg) 1404 { 1405 struct ifbrparam *param = arg; 1406 1407 return (bstp_set_fdelay(&sc->sc_stp, param->ifbrp_fwddelay)); 1408 } 1409 1410 static int 1411 bridge_ioctl_gma(struct bridge_softc *sc, void *arg) 1412 { 1413 struct ifbrparam *param = arg; 1414 struct bstp_state *bs = &sc->sc_stp; 1415 1416 param->ifbrp_maxage = bs->bs_bridge_max_age >> 8; 1417 return (0); 1418 } 1419 1420 static int 1421 bridge_ioctl_sma(struct bridge_softc *sc, void *arg) 1422 { 1423 struct ifbrparam *param = arg; 1424 1425 return (bstp_set_maxage(&sc->sc_stp, param->ifbrp_maxage)); 1426 } 1427 1428 static int 1429 bridge_ioctl_sifprio(struct bridge_softc *sc, void *arg) 1430 { 1431 struct ifbreq *req = arg; 1432 struct bridge_iflist *bif; 1433 1434 bif = bridge_lookup_member(sc, req->ifbr_ifsname); 1435 if (bif == NULL) 1436 return (ENOENT); 1437 1438 return (bstp_set_port_priority(&bif->bif_stp, req->ifbr_priority)); 1439 } 1440 1441 static int 1442 bridge_ioctl_sifcost(struct bridge_softc *sc, void *arg) 1443 { 1444 struct ifbreq *req = arg; 1445 struct bridge_iflist *bif; 1446 1447 bif = bridge_lookup_member(sc, req->ifbr_ifsname); 1448 if (bif == NULL) 1449 return (ENOENT); 1450 1451 return (bstp_set_path_cost(&bif->bif_stp, req->ifbr_path_cost)); 1452 } 1453 1454 static int 1455 bridge_ioctl_sifmaxaddr(struct bridge_softc *sc, void *arg) 1456 { 1457 struct ifbreq *req = arg; 1458 struct bridge_iflist *bif; 1459 1460 bif = bridge_lookup_member(sc, req->ifbr_ifsname); 1461 if (bif == NULL) 1462 return (ENOENT); 1463 1464 bif->bif_addrmax = req->ifbr_addrmax; 1465 return (0); 1466 } 1467 1468 static int 1469 bridge_ioctl_addspan(struct bridge_softc *sc, void *arg) 1470 { 1471 struct ifbreq *req = arg; 1472 struct bridge_iflist *bif = NULL; 1473 struct ifnet *ifs; 1474 1475 ifs = ifunit(req->ifbr_ifsname); 1476 if (ifs == NULL) 1477 return (ENOENT); 1478 1479 LIST_FOREACH(bif, &sc->sc_spanlist, bif_next) 1480 if (ifs == bif->bif_ifp) 1481 return (EBUSY); 1482 1483 if (ifs->if_bridge != NULL) 1484 return (EBUSY); 1485 1486 switch (ifs->if_type) { 1487 case IFT_ETHER: 1488 case IFT_GIF: 1489 case IFT_L2VLAN: 1490 break; 1491 default: 1492 return (EINVAL); 1493 } 1494 1495 bif = malloc(sizeof(*bif), M_DEVBUF, M_NOWAIT|M_ZERO); 1496 if (bif == NULL) 1497 return (ENOMEM); 1498 1499 bif->bif_ifp = ifs; 1500 bif->bif_flags = IFBIF_SPAN; 1501 1502 LIST_INSERT_HEAD(&sc->sc_spanlist, bif, bif_next); 1503 1504 return (0); 1505 } 1506 1507 static int 1508 bridge_ioctl_delspan(struct bridge_softc *sc, void *arg) 1509 { 1510 struct ifbreq *req = arg; 1511 struct bridge_iflist *bif; 1512 struct ifnet *ifs; 1513 1514 ifs = ifunit(req->ifbr_ifsname); 1515 if (ifs == NULL) 1516 return (ENOENT); 1517 1518 LIST_FOREACH(bif, &sc->sc_spanlist, bif_next) 1519 if (ifs == bif->bif_ifp) 1520 break; 1521 1522 if (bif == NULL) 1523 return (ENOENT); 1524 1525 bridge_delete_span(sc, bif); 1526 1527 return (0); 1528 } 1529 1530 static int 1531 bridge_ioctl_gbparam(struct bridge_softc *sc, void *arg) 1532 { 1533 struct ifbropreq *req = arg; 1534 struct bstp_state *bs = &sc->sc_stp; 1535 struct bstp_port *root_port; 1536 1537 req->ifbop_maxage = bs->bs_bridge_max_age >> 8; 1538 req->ifbop_hellotime = bs->bs_bridge_htime >> 8; 1539 req->ifbop_fwddelay = bs->bs_bridge_fdelay >> 8; 1540 1541 root_port = bs->bs_root_port; 1542 if (root_port == NULL) 1543 req->ifbop_root_port = 0; 1544 else 1545 req->ifbop_root_port = root_port->bp_ifp->if_index; 1546 1547 req->ifbop_holdcount = bs->bs_txholdcount; 1548 req->ifbop_priority = bs->bs_bridge_priority; 1549 req->ifbop_protocol = bs->bs_protover; 1550 req->ifbop_root_path_cost = bs->bs_root_pv.pv_cost; 1551 req->ifbop_bridgeid = bs->bs_bridge_pv.pv_dbridge_id; 1552 req->ifbop_designated_root = bs->bs_root_pv.pv_root_id; 1553 req->ifbop_designated_bridge = bs->bs_root_pv.pv_dbridge_id; 1554 req->ifbop_last_tc_time.tv_sec = bs->bs_last_tc_time.tv_sec; 1555 req->ifbop_last_tc_time.tv_usec = bs->bs_last_tc_time.tv_usec; 1556 1557 return (0); 1558 } 1559 1560 static int 1561 bridge_ioctl_grte(struct bridge_softc *sc, void *arg) 1562 { 1563 struct ifbrparam *param = arg; 1564 1565 param->ifbrp_cexceeded = sc->sc_brtexceeded; 1566 return (0); 1567 } 1568 1569 static int 1570 bridge_ioctl_gifsstp(struct bridge_softc *sc, void *arg) 1571 { 1572 struct ifbpstpconf *bifstp = arg; 1573 struct bridge_iflist *bif; 1574 struct bstp_port *bp; 1575 struct ifbpstpreq bpreq; 1576 char *buf, *outbuf; 1577 int count, buflen, len, error = 0; 1578 1579 count = 0; 1580 LIST_FOREACH(bif, &sc->sc_iflist, bif_next) { 1581 if ((bif->bif_flags & IFBIF_STP) != 0) 1582 count++; 1583 } 1584 1585 buflen = sizeof(bpreq) * count; 1586 if (bifstp->ifbpstp_len == 0) { 1587 bifstp->ifbpstp_len = buflen; 1588 return (0); 1589 } 1590 1591 BRIDGE_UNLOCK(sc); 1592 outbuf = malloc(buflen, M_TEMP, M_WAITOK | M_ZERO); 1593 BRIDGE_LOCK(sc); 1594 1595 count = 0; 1596 buf = outbuf; 1597 len = min(bifstp->ifbpstp_len, buflen); 1598 bzero(&bpreq, sizeof(bpreq)); 1599 LIST_FOREACH(bif, &sc->sc_iflist, bif_next) { 1600 if (len < sizeof(bpreq)) 1601 break; 1602 1603 if ((bif->bif_flags & IFBIF_STP) == 0) 1604 continue; 1605 1606 bp = &bif->bif_stp; 1607 bpreq.ifbp_portno = bif->bif_ifp->if_index & 0xfff; 1608 bpreq.ifbp_fwd_trans = bp->bp_forward_transitions; 1609 bpreq.ifbp_design_cost = bp->bp_desg_pv.pv_cost; 1610 bpreq.ifbp_design_port = bp->bp_desg_pv.pv_port_id; 1611 bpreq.ifbp_design_bridge = bp->bp_desg_pv.pv_dbridge_id; 1612 bpreq.ifbp_design_root = bp->bp_desg_pv.pv_root_id; 1613 1614 memcpy(buf, &bpreq, sizeof(bpreq)); 1615 count++; 1616 buf += sizeof(bpreq); 1617 len -= sizeof(bpreq); 1618 } 1619 1620 BRIDGE_UNLOCK(sc); 1621 bifstp->ifbpstp_len = sizeof(bpreq) * count; 1622 error = copyout(outbuf, bifstp->ifbpstp_req, bifstp->ifbpstp_len); 1623 BRIDGE_LOCK(sc); 1624 free(outbuf, M_TEMP); 1625 return (error); 1626 } 1627 1628 static int 1629 bridge_ioctl_sproto(struct bridge_softc *sc, void *arg) 1630 { 1631 struct ifbrparam *param = arg; 1632 1633 return (bstp_set_protocol(&sc->sc_stp, param->ifbrp_proto)); 1634 } 1635 1636 static int 1637 bridge_ioctl_stxhc(struct bridge_softc *sc, void *arg) 1638 { 1639 struct ifbrparam *param = arg; 1640 1641 return (bstp_set_holdcount(&sc->sc_stp, param->ifbrp_txhc)); 1642 } 1643 1644 /* 1645 * bridge_ifdetach: 1646 * 1647 * Detach an interface from a bridge. Called when a member 1648 * interface is detaching. 1649 */ 1650 static void 1651 bridge_ifdetach(void *arg __unused, struct ifnet *ifp) 1652 { 1653 struct bridge_softc *sc = ifp->if_bridge; 1654 struct bridge_iflist *bif; 1655 1656 /* Check if the interface is a bridge member */ 1657 if (sc != NULL) { 1658 BRIDGE_LOCK(sc); 1659 1660 bif = bridge_lookup_member_if(sc, ifp); 1661 if (bif != NULL) 1662 bridge_delete_member(sc, bif, 1); 1663 1664 BRIDGE_UNLOCK(sc); 1665 return; 1666 } 1667 1668 /* Check if the interface is a span port */ 1669 mtx_lock(&bridge_list_mtx); 1670 LIST_FOREACH(sc, &bridge_list, sc_list) { 1671 BRIDGE_LOCK(sc); 1672 LIST_FOREACH(bif, &sc->sc_spanlist, bif_next) 1673 if (ifp == bif->bif_ifp) { 1674 bridge_delete_span(sc, bif); 1675 break; 1676 } 1677 1678 BRIDGE_UNLOCK(sc); 1679 } 1680 mtx_unlock(&bridge_list_mtx); 1681 } 1682 1683 /* 1684 * bridge_init: 1685 * 1686 * Initialize a bridge interface. 1687 */ 1688 static void 1689 bridge_init(void *xsc) 1690 { 1691 struct bridge_softc *sc = (struct bridge_softc *)xsc; 1692 struct ifnet *ifp = sc->sc_ifp; 1693 1694 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 1695 return; 1696 1697 BRIDGE_LOCK(sc); 1698 callout_reset(&sc->sc_brcallout, bridge_rtable_prune_period * hz, 1699 bridge_timer, sc); 1700 1701 ifp->if_drv_flags |= IFF_DRV_RUNNING; 1702 bstp_init(&sc->sc_stp); /* Initialize Spanning Tree */ 1703 1704 BRIDGE_UNLOCK(sc); 1705 } 1706 1707 /* 1708 * bridge_stop: 1709 * 1710 * Stop the bridge interface. 1711 */ 1712 static void 1713 bridge_stop(struct ifnet *ifp, int disable) 1714 { 1715 struct bridge_softc *sc = ifp->if_softc; 1716 1717 BRIDGE_LOCK_ASSERT(sc); 1718 1719 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) 1720 return; 1721 1722 callout_stop(&sc->sc_brcallout); 1723 bstp_stop(&sc->sc_stp); 1724 1725 bridge_rtflush(sc, IFBF_FLUSHDYN); 1726 1727 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1728 } 1729 1730 /* 1731 * bridge_enqueue: 1732 * 1733 * Enqueue a packet on a bridge member interface. 1734 * 1735 */ 1736 static void 1737 bridge_enqueue(struct bridge_softc *sc, struct ifnet *dst_ifp, struct mbuf *m) 1738 { 1739 int len, err = 0; 1740 short mflags; 1741 struct mbuf *m0; 1742 1743 len = m->m_pkthdr.len; 1744 mflags = m->m_flags; 1745 1746 /* We may be sending a fragment so traverse the mbuf */ 1747 for (; m; m = m0) { 1748 m0 = m->m_nextpkt; 1749 m->m_nextpkt = NULL; 1750 1751 /* 1752 * If underlying interface can not do VLAN tag insertion itself 1753 * then attach a packet tag that holds it. 1754 */ 1755 if ((m->m_flags & M_VLANTAG) && 1756 (dst_ifp->if_capenable & IFCAP_VLAN_HWTAGGING) == 0) { 1757 m = ether_vlanencap(m, m->m_pkthdr.ether_vtag); 1758 if (m == NULL) { 1759 if_printf(dst_ifp, 1760 "unable to prepend VLAN header\n"); 1761 dst_ifp->if_oerrors++; 1762 continue; 1763 } 1764 m->m_flags &= ~M_VLANTAG; 1765 } 1766 1767 if (err == 0) 1768 dst_ifp->if_transmit(dst_ifp, m); 1769 } 1770 1771 if (err == 0) { 1772 sc->sc_ifp->if_opackets++; 1773 sc->sc_ifp->if_obytes += len; 1774 if (mflags & M_MCAST) 1775 sc->sc_ifp->if_omcasts++; 1776 } 1777 } 1778 1779 /* 1780 * bridge_dummynet: 1781 * 1782 * Receive a queued packet from dummynet and pass it on to the output 1783 * interface. 1784 * 1785 * The mbuf has the Ethernet header already attached. 1786 */ 1787 static void 1788 bridge_dummynet(struct mbuf *m, struct ifnet *ifp) 1789 { 1790 struct bridge_softc *sc; 1791 1792 sc = ifp->if_bridge; 1793 1794 /* 1795 * The packet didnt originate from a member interface. This should only 1796 * ever happen if a member interface is removed while packets are 1797 * queued for it. 1798 */ 1799 if (sc == NULL) { 1800 m_freem(m); 1801 return; 1802 } 1803 1804 if (PFIL_HOOKED(&inet_pfil_hook) 1805 #ifdef INET6 1806 || PFIL_HOOKED(&inet6_pfil_hook) 1807 #endif 1808 ) { 1809 if (bridge_pfil(&m, sc->sc_ifp, ifp, PFIL_OUT) != 0) 1810 return; 1811 if (m == NULL) 1812 return; 1813 } 1814 1815 bridge_enqueue(sc, ifp, m); 1816 } 1817 1818 /* 1819 * bridge_output: 1820 * 1821 * Send output from a bridge member interface. This 1822 * performs the bridging function for locally originated 1823 * packets. 1824 * 1825 * The mbuf has the Ethernet header already attached. We must 1826 * enqueue or free the mbuf before returning. 1827 */ 1828 static int 1829 bridge_output(struct ifnet *ifp, struct mbuf *m, struct sockaddr *sa, 1830 struct rtentry *rt) 1831 { 1832 struct ether_header *eh; 1833 struct ifnet *dst_if; 1834 struct bridge_softc *sc; 1835 uint16_t vlan; 1836 1837 if (m->m_len < ETHER_HDR_LEN) { 1838 m = m_pullup(m, ETHER_HDR_LEN); 1839 if (m == NULL) 1840 return (0); 1841 } 1842 1843 eh = mtod(m, struct ether_header *); 1844 sc = ifp->if_bridge; 1845 vlan = VLANTAGOF(m); 1846 1847 BRIDGE_LOCK(sc); 1848 1849 /* 1850 * If bridge is down, but the original output interface is up, 1851 * go ahead and send out that interface. Otherwise, the packet 1852 * is dropped below. 1853 */ 1854 if ((sc->sc_ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { 1855 dst_if = ifp; 1856 goto sendunicast; 1857 } 1858 1859 /* 1860 * If the packet is a multicast, or we don't know a better way to 1861 * get there, send to all interfaces. 1862 */ 1863 if (ETHER_IS_MULTICAST(eh->ether_dhost)) 1864 dst_if = NULL; 1865 else 1866 dst_if = bridge_rtlookup(sc, eh->ether_dhost, vlan); 1867 if (dst_if == NULL) { 1868 struct bridge_iflist *bif; 1869 struct mbuf *mc; 1870 int error = 0, used = 0; 1871 1872 bridge_span(sc, m); 1873 1874 BRIDGE_LOCK2REF(sc, error); 1875 if (error) { 1876 m_freem(m); 1877 return (0); 1878 } 1879 1880 LIST_FOREACH(bif, &sc->sc_iflist, bif_next) { 1881 dst_if = bif->bif_ifp; 1882 1883 if (dst_if->if_type == IFT_GIF) 1884 continue; 1885 if ((dst_if->if_drv_flags & IFF_DRV_RUNNING) == 0) 1886 continue; 1887 1888 /* 1889 * If this is not the original output interface, 1890 * and the interface is participating in spanning 1891 * tree, make sure the port is in a state that 1892 * allows forwarding. 1893 */ 1894 if (dst_if != ifp && (bif->bif_flags & IFBIF_STP) && 1895 bif->bif_stp.bp_state == BSTP_IFSTATE_DISCARDING) 1896 continue; 1897 1898 if (LIST_NEXT(bif, bif_next) == NULL) { 1899 used = 1; 1900 mc = m; 1901 } else { 1902 mc = m_copypacket(m, M_DONTWAIT); 1903 if (mc == NULL) { 1904 sc->sc_ifp->if_oerrors++; 1905 continue; 1906 } 1907 } 1908 1909 bridge_enqueue(sc, dst_if, mc); 1910 } 1911 if (used == 0) 1912 m_freem(m); 1913 BRIDGE_UNREF(sc); 1914 return (0); 1915 } 1916 1917 sendunicast: 1918 /* 1919 * XXX Spanning tree consideration here? 1920 */ 1921 1922 bridge_span(sc, m); 1923 if ((dst_if->if_drv_flags & IFF_DRV_RUNNING) == 0) { 1924 m_freem(m); 1925 BRIDGE_UNLOCK(sc); 1926 return (0); 1927 } 1928 1929 BRIDGE_UNLOCK(sc); 1930 bridge_enqueue(sc, dst_if, m); 1931 return (0); 1932 } 1933 1934 /* 1935 * bridge_start: 1936 * 1937 * Start output on a bridge. 1938 * 1939 */ 1940 static void 1941 bridge_start(struct ifnet *ifp) 1942 { 1943 struct bridge_softc *sc; 1944 struct mbuf *m; 1945 struct ether_header *eh; 1946 struct ifnet *dst_if; 1947 1948 sc = ifp->if_softc; 1949 1950 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 1951 for (;;) { 1952 IFQ_DEQUEUE(&ifp->if_snd, m); 1953 if (m == 0) 1954 break; 1955 ETHER_BPF_MTAP(ifp, m); 1956 1957 eh = mtod(m, struct ether_header *); 1958 dst_if = NULL; 1959 1960 BRIDGE_LOCK(sc); 1961 if ((m->m_flags & (M_BCAST|M_MCAST)) == 0) { 1962 dst_if = bridge_rtlookup(sc, eh->ether_dhost, 1); 1963 } 1964 1965 if (dst_if == NULL) 1966 bridge_broadcast(sc, ifp, m, 0); 1967 else { 1968 BRIDGE_UNLOCK(sc); 1969 bridge_enqueue(sc, dst_if, m); 1970 } 1971 } 1972 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 1973 } 1974 1975 /* 1976 * bridge_forward: 1977 * 1978 * The forwarding function of the bridge. 1979 * 1980 * NOTE: Releases the lock on return. 1981 */ 1982 static void 1983 bridge_forward(struct bridge_softc *sc, struct bridge_iflist *sbif, 1984 struct mbuf *m) 1985 { 1986 struct bridge_iflist *dbif; 1987 struct ifnet *src_if, *dst_if, *ifp; 1988 struct ether_header *eh; 1989 uint16_t vlan; 1990 uint8_t *dst; 1991 int error; 1992 1993 src_if = m->m_pkthdr.rcvif; 1994 ifp = sc->sc_ifp; 1995 1996 ifp->if_ipackets++; 1997 ifp->if_ibytes += m->m_pkthdr.len; 1998 vlan = VLANTAGOF(m); 1999 2000 if ((sbif->bif_flags & IFBIF_STP) && 2001 sbif->bif_stp.bp_state == BSTP_IFSTATE_DISCARDING) 2002 goto drop; 2003 2004 eh = mtod(m, struct ether_header *); 2005 dst = eh->ether_dhost; 2006 2007 /* If the interface is learning, record the address. */ 2008 if (sbif->bif_flags & IFBIF_LEARNING) { 2009 error = bridge_rtupdate(sc, eh->ether_shost, vlan, 2010 sbif, 0, IFBAF_DYNAMIC); 2011 /* 2012 * If the interface has addresses limits then deny any source 2013 * that is not in the cache. 2014 */ 2015 if (error && sbif->bif_addrmax) 2016 goto drop; 2017 } 2018 2019 if ((sbif->bif_flags & IFBIF_STP) != 0 && 2020 sbif->bif_stp.bp_state == BSTP_IFSTATE_LEARNING) 2021 goto drop; 2022 2023 /* 2024 * At this point, the port either doesn't participate 2025 * in spanning tree or it is in the forwarding state. 2026 */ 2027 2028 /* 2029 * If the packet is unicast, destined for someone on 2030 * "this" side of the bridge, drop it. 2031 */ 2032 if ((m->m_flags & (M_BCAST|M_MCAST)) == 0) { 2033 dst_if = bridge_rtlookup(sc, dst, vlan); 2034 if (src_if == dst_if) 2035 goto drop; 2036 } else { 2037 /* 2038 * Check if its a reserved multicast address, any address 2039 * listed in 802.1D section 7.12.6 may not be forwarded by the 2040 * bridge. 2041 * This is currently 01-80-C2-00-00-00 to 01-80-C2-00-00-0F 2042 */ 2043 if (dst[0] == 0x01 && dst[1] == 0x80 && 2044 dst[2] == 0xc2 && dst[3] == 0x00 && 2045 dst[4] == 0x00 && dst[5] <= 0x0f) 2046 goto drop; 2047 2048 /* ...forward it to all interfaces. */ 2049 ifp->if_imcasts++; 2050 dst_if = NULL; 2051 } 2052 2053 /* 2054 * If we have a destination interface which is a member of our bridge, 2055 * OR this is a unicast packet, push it through the bpf(4) machinery. 2056 * For broadcast or multicast packets, don't bother because it will 2057 * be reinjected into ether_input. We do this before we pass the packets 2058 * through the pfil(9) framework, as it is possible that pfil(9) will 2059 * drop the packet, or possibly modify it, making it difficult to debug 2060 * firewall issues on the bridge. 2061 */ 2062 if (dst_if != NULL || (m->m_flags & (M_BCAST | M_MCAST)) == 0) 2063 ETHER_BPF_MTAP(ifp, m); 2064 2065 /* run the packet filter */ 2066 if (PFIL_HOOKED(&inet_pfil_hook) 2067 #ifdef INET6 2068 || PFIL_HOOKED(&inet6_pfil_hook) 2069 #endif 2070 ) { 2071 BRIDGE_UNLOCK(sc); 2072 if (bridge_pfil(&m, ifp, src_if, PFIL_IN) != 0) 2073 return; 2074 if (m == NULL) 2075 return; 2076 BRIDGE_LOCK(sc); 2077 } 2078 2079 if (dst_if == NULL) { 2080 bridge_broadcast(sc, src_if, m, 1); 2081 return; 2082 } 2083 2084 /* 2085 * At this point, we're dealing with a unicast frame 2086 * going to a different interface. 2087 */ 2088 if ((dst_if->if_drv_flags & IFF_DRV_RUNNING) == 0) 2089 goto drop; 2090 2091 dbif = bridge_lookup_member_if(sc, dst_if); 2092 if (dbif == NULL) 2093 /* Not a member of the bridge (anymore?) */ 2094 goto drop; 2095 2096 /* Private segments can not talk to each other */ 2097 if (sbif->bif_flags & dbif->bif_flags & IFBIF_PRIVATE) 2098 goto drop; 2099 2100 if ((dbif->bif_flags & IFBIF_STP) && 2101 dbif->bif_stp.bp_state == BSTP_IFSTATE_DISCARDING) 2102 goto drop; 2103 2104 BRIDGE_UNLOCK(sc); 2105 2106 if (PFIL_HOOKED(&inet_pfil_hook) 2107 #ifdef INET6 2108 || PFIL_HOOKED(&inet6_pfil_hook) 2109 #endif 2110 ) { 2111 if (bridge_pfil(&m, ifp, dst_if, PFIL_OUT) != 0) 2112 return; 2113 if (m == NULL) 2114 return; 2115 } 2116 2117 bridge_enqueue(sc, dst_if, m); 2118 return; 2119 2120 drop: 2121 BRIDGE_UNLOCK(sc); 2122 m_freem(m); 2123 } 2124 2125 /* 2126 * bridge_input: 2127 * 2128 * Receive input from a member interface. Queue the packet for 2129 * bridging if it is not for us. 2130 */ 2131 static struct mbuf * 2132 bridge_input(struct ifnet *ifp, struct mbuf *m) 2133 { 2134 struct bridge_softc *sc = ifp->if_bridge; 2135 struct bridge_iflist *bif, *bif2; 2136 struct ifnet *bifp; 2137 struct ether_header *eh; 2138 struct mbuf *mc, *mc2; 2139 uint16_t vlan; 2140 int error; 2141 2142 if ((sc->sc_ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) 2143 return (m); 2144 2145 bifp = sc->sc_ifp; 2146 vlan = VLANTAGOF(m); 2147 2148 /* 2149 * Implement support for bridge monitoring. If this flag has been 2150 * set on this interface, discard the packet once we push it through 2151 * the bpf(4) machinery, but before we do, increment the byte and 2152 * packet counters associated with this interface. 2153 */ 2154 if ((bifp->if_flags & IFF_MONITOR) != 0) { 2155 m->m_pkthdr.rcvif = bifp; 2156 ETHER_BPF_MTAP(bifp, m); 2157 bifp->if_ipackets++; 2158 bifp->if_ibytes += m->m_pkthdr.len; 2159 m_freem(m); 2160 return (NULL); 2161 } 2162 BRIDGE_LOCK(sc); 2163 bif = bridge_lookup_member_if(sc, ifp); 2164 if (bif == NULL) { 2165 BRIDGE_UNLOCK(sc); 2166 return (m); 2167 } 2168 2169 eh = mtod(m, struct ether_header *); 2170 2171 bridge_span(sc, m); 2172 2173 if (m->m_flags & (M_BCAST|M_MCAST)) { 2174 /* Tap off 802.1D packets; they do not get forwarded. */ 2175 if (memcmp(eh->ether_dhost, bstp_etheraddr, 2176 ETHER_ADDR_LEN) == 0) { 2177 m = bstp_input(&bif->bif_stp, ifp, m); 2178 if (m == NULL) { 2179 BRIDGE_UNLOCK(sc); 2180 return (NULL); 2181 } 2182 } 2183 2184 if ((bif->bif_flags & IFBIF_STP) && 2185 bif->bif_stp.bp_state == BSTP_IFSTATE_DISCARDING) { 2186 BRIDGE_UNLOCK(sc); 2187 return (m); 2188 } 2189 2190 /* 2191 * Make a deep copy of the packet and enqueue the copy 2192 * for bridge processing; return the original packet for 2193 * local processing. 2194 */ 2195 mc = m_dup(m, M_DONTWAIT); 2196 if (mc == NULL) { 2197 BRIDGE_UNLOCK(sc); 2198 return (m); 2199 } 2200 2201 /* Perform the bridge forwarding function with the copy. */ 2202 bridge_forward(sc, bif, mc); 2203 2204 /* 2205 * Reinject the mbuf as arriving on the bridge so we have a 2206 * chance at claiming multicast packets. We can not loop back 2207 * here from ether_input as a bridge is never a member of a 2208 * bridge. 2209 */ 2210 KASSERT(bifp->if_bridge == NULL, 2211 ("loop created in bridge_input")); 2212 mc2 = m_dup(m, M_DONTWAIT); 2213 if (mc2 != NULL) { 2214 /* Keep the layer3 header aligned */ 2215 int i = min(mc2->m_pkthdr.len, max_protohdr); 2216 mc2 = m_copyup(mc2, i, ETHER_ALIGN); 2217 } 2218 if (mc2 != NULL) { 2219 mc2->m_pkthdr.rcvif = bifp; 2220 (*bifp->if_input)(bifp, mc2); 2221 } 2222 2223 /* Return the original packet for local processing. */ 2224 return (m); 2225 } 2226 2227 if ((bif->bif_flags & IFBIF_STP) && 2228 bif->bif_stp.bp_state == BSTP_IFSTATE_DISCARDING) { 2229 BRIDGE_UNLOCK(sc); 2230 return (m); 2231 } 2232 2233 #if (defined(INET) || defined(INET6)) && defined(DEV_CARP) 2234 # define OR_CARP_CHECK_WE_ARE_DST(iface) \ 2235 || ((iface)->if_carp \ 2236 && carp_forus((iface)->if_carp, eh->ether_dhost)) 2237 # define OR_CARP_CHECK_WE_ARE_SRC(iface) \ 2238 || ((iface)->if_carp \ 2239 && carp_forus((iface)->if_carp, eh->ether_shost)) 2240 #else 2241 # define OR_CARP_CHECK_WE_ARE_DST(iface) 2242 # define OR_CARP_CHECK_WE_ARE_SRC(iface) 2243 #endif 2244 2245 #ifdef INET6 2246 # define OR_PFIL_HOOKED_INET6 \ 2247 || PFIL_HOOKED(&inet6_pfil_hook) 2248 #else 2249 # define OR_PFIL_HOOKED_INET6 2250 #endif 2251 2252 #define GRAB_OUR_PACKETS(iface) \ 2253 if ((iface)->if_type == IFT_GIF) \ 2254 continue; \ 2255 /* It is destined for us. */ \ 2256 if (memcmp(IF_LLADDR((iface)), eh->ether_dhost, ETHER_ADDR_LEN) == 0 \ 2257 OR_CARP_CHECK_WE_ARE_DST((iface)) \ 2258 ) { \ 2259 if ((iface)->if_type == IFT_BRIDGE) { \ 2260 ETHER_BPF_MTAP(iface, m); \ 2261 iface->if_ipackets++; \ 2262 /* Filter on the physical interface. */ \ 2263 if (pfil_local_phys && \ 2264 (PFIL_HOOKED(&inet_pfil_hook) \ 2265 OR_PFIL_HOOKED_INET6)) { \ 2266 if (bridge_pfil(&m, NULL, ifp, \ 2267 PFIL_IN) != 0 || m == NULL) { \ 2268 BRIDGE_UNLOCK(sc); \ 2269 return (NULL); \ 2270 } \ 2271 } \ 2272 } \ 2273 if (bif->bif_flags & IFBIF_LEARNING) { \ 2274 error = bridge_rtupdate(sc, eh->ether_shost, \ 2275 vlan, bif, 0, IFBAF_DYNAMIC); \ 2276 if (error && bif->bif_addrmax) { \ 2277 BRIDGE_UNLOCK(sc); \ 2278 m_freem(m); \ 2279 return (NULL); \ 2280 } \ 2281 } \ 2282 m->m_pkthdr.rcvif = iface; \ 2283 BRIDGE_UNLOCK(sc); \ 2284 return (m); \ 2285 } \ 2286 \ 2287 /* We just received a packet that we sent out. */ \ 2288 if (memcmp(IF_LLADDR((iface)), eh->ether_shost, ETHER_ADDR_LEN) == 0 \ 2289 OR_CARP_CHECK_WE_ARE_SRC((iface)) \ 2290 ) { \ 2291 BRIDGE_UNLOCK(sc); \ 2292 m_freem(m); \ 2293 return (NULL); \ 2294 } 2295 2296 /* 2297 * Unicast. Make sure it's not for the bridge. 2298 */ 2299 do { GRAB_OUR_PACKETS(bifp) } while (0); 2300 2301 /* 2302 * Give a chance for ifp at first priority. This will help when the 2303 * packet comes through the interface like VLAN's with the same MACs 2304 * on several interfaces from the same bridge. This also will save 2305 * some CPU cycles in case the destination interface and the input 2306 * interface (eq ifp) are the same. 2307 */ 2308 do { GRAB_OUR_PACKETS(ifp) } while (0); 2309 2310 /* Now check the all bridge members. */ 2311 LIST_FOREACH(bif2, &sc->sc_iflist, bif_next) { 2312 GRAB_OUR_PACKETS(bif2->bif_ifp) 2313 } 2314 2315 #undef OR_CARP_CHECK_WE_ARE_DST 2316 #undef OR_CARP_CHECK_WE_ARE_SRC 2317 #undef OR_PFIL_HOOKED_INET6 2318 #undef GRAB_OUR_PACKETS 2319 2320 /* Perform the bridge forwarding function. */ 2321 bridge_forward(sc, bif, m); 2322 2323 return (NULL); 2324 } 2325 2326 /* 2327 * bridge_broadcast: 2328 * 2329 * Send a frame to all interfaces that are members of 2330 * the bridge, except for the one on which the packet 2331 * arrived. 2332 * 2333 * NOTE: Releases the lock on return. 2334 */ 2335 static void 2336 bridge_broadcast(struct bridge_softc *sc, struct ifnet *src_if, 2337 struct mbuf *m, int runfilt) 2338 { 2339 struct bridge_iflist *dbif, *sbif; 2340 struct mbuf *mc; 2341 struct ifnet *dst_if; 2342 int error = 0, used = 0, i; 2343 2344 sbif = bridge_lookup_member_if(sc, src_if); 2345 2346 BRIDGE_LOCK2REF(sc, error); 2347 if (error) { 2348 m_freem(m); 2349 return; 2350 } 2351 2352 /* Filter on the bridge interface before broadcasting */ 2353 if (runfilt && (PFIL_HOOKED(&inet_pfil_hook) 2354 #ifdef INET6 2355 || PFIL_HOOKED(&inet6_pfil_hook) 2356 #endif 2357 )) { 2358 if (bridge_pfil(&m, sc->sc_ifp, NULL, PFIL_OUT) != 0) 2359 goto out; 2360 if (m == NULL) 2361 goto out; 2362 } 2363 2364 LIST_FOREACH(dbif, &sc->sc_iflist, bif_next) { 2365 dst_if = dbif->bif_ifp; 2366 if (dst_if == src_if) 2367 continue; 2368 2369 /* Private segments can not talk to each other */ 2370 if (sbif && (sbif->bif_flags & dbif->bif_flags & IFBIF_PRIVATE)) 2371 continue; 2372 2373 if ((dbif->bif_flags & IFBIF_STP) && 2374 dbif->bif_stp.bp_state == BSTP_IFSTATE_DISCARDING) 2375 continue; 2376 2377 if ((dbif->bif_flags & IFBIF_DISCOVER) == 0 && 2378 (m->m_flags & (M_BCAST|M_MCAST)) == 0) 2379 continue; 2380 2381 if ((dst_if->if_drv_flags & IFF_DRV_RUNNING) == 0) 2382 continue; 2383 2384 if (LIST_NEXT(dbif, bif_next) == NULL) { 2385 mc = m; 2386 used = 1; 2387 } else { 2388 mc = m_dup(m, M_DONTWAIT); 2389 if (mc == NULL) { 2390 sc->sc_ifp->if_oerrors++; 2391 continue; 2392 } 2393 } 2394 2395 /* 2396 * Filter on the output interface. Pass a NULL bridge interface 2397 * pointer so we do not redundantly filter on the bridge for 2398 * each interface we broadcast on. 2399 */ 2400 if (runfilt && (PFIL_HOOKED(&inet_pfil_hook) 2401 #ifdef INET6 2402 || PFIL_HOOKED(&inet6_pfil_hook) 2403 #endif 2404 )) { 2405 if (used == 0) { 2406 /* Keep the layer3 header aligned */ 2407 i = min(mc->m_pkthdr.len, max_protohdr); 2408 mc = m_copyup(mc, i, ETHER_ALIGN); 2409 if (mc == NULL) { 2410 sc->sc_ifp->if_oerrors++; 2411 continue; 2412 } 2413 } 2414 if (bridge_pfil(&mc, NULL, dst_if, PFIL_OUT) != 0) 2415 continue; 2416 if (mc == NULL) 2417 continue; 2418 } 2419 2420 bridge_enqueue(sc, dst_if, mc); 2421 } 2422 if (used == 0) 2423 m_freem(m); 2424 2425 out: 2426 BRIDGE_UNREF(sc); 2427 } 2428 2429 /* 2430 * bridge_span: 2431 * 2432 * Duplicate a packet out one or more interfaces that are in span mode, 2433 * the original mbuf is unmodified. 2434 */ 2435 static void 2436 bridge_span(struct bridge_softc *sc, struct mbuf *m) 2437 { 2438 struct bridge_iflist *bif; 2439 struct ifnet *dst_if; 2440 struct mbuf *mc; 2441 2442 if (LIST_EMPTY(&sc->sc_spanlist)) 2443 return; 2444 2445 LIST_FOREACH(bif, &sc->sc_spanlist, bif_next) { 2446 dst_if = bif->bif_ifp; 2447 2448 if ((dst_if->if_drv_flags & IFF_DRV_RUNNING) == 0) 2449 continue; 2450 2451 mc = m_copypacket(m, M_DONTWAIT); 2452 if (mc == NULL) { 2453 sc->sc_ifp->if_oerrors++; 2454 continue; 2455 } 2456 2457 bridge_enqueue(sc, dst_if, mc); 2458 } 2459 } 2460 2461 /* 2462 * bridge_rtupdate: 2463 * 2464 * Add a bridge routing entry. 2465 */ 2466 static int 2467 bridge_rtupdate(struct bridge_softc *sc, const uint8_t *dst, uint16_t vlan, 2468 struct bridge_iflist *bif, int setflags, uint8_t flags) 2469 { 2470 struct bridge_rtnode *brt; 2471 int error; 2472 2473 BRIDGE_LOCK_ASSERT(sc); 2474 2475 /* Check the source address is valid and not multicast. */ 2476 if (ETHER_IS_MULTICAST(dst) || 2477 (dst[0] == 0 && dst[1] == 0 && dst[2] == 0 && 2478 dst[3] == 0 && dst[4] == 0 && dst[5] == 0) != 0) 2479 return (EINVAL); 2480 2481 /* 802.1p frames map to vlan 1 */ 2482 if (vlan == 0) 2483 vlan = 1; 2484 2485 /* 2486 * A route for this destination might already exist. If so, 2487 * update it, otherwise create a new one. 2488 */ 2489 if ((brt = bridge_rtnode_lookup(sc, dst, vlan)) == NULL) { 2490 if (sc->sc_brtcnt >= sc->sc_brtmax) { 2491 sc->sc_brtexceeded++; 2492 return (ENOSPC); 2493 } 2494 /* Check per interface address limits (if enabled) */ 2495 if (bif->bif_addrmax && bif->bif_addrcnt >= bif->bif_addrmax) { 2496 bif->bif_addrexceeded++; 2497 return (ENOSPC); 2498 } 2499 2500 /* 2501 * Allocate a new bridge forwarding node, and 2502 * initialize the expiration time and Ethernet 2503 * address. 2504 */ 2505 brt = uma_zalloc(bridge_rtnode_zone, M_NOWAIT | M_ZERO); 2506 if (brt == NULL) 2507 return (ENOMEM); 2508 2509 if (bif->bif_flags & IFBIF_STICKY) 2510 brt->brt_flags = IFBAF_STICKY; 2511 else 2512 brt->brt_flags = IFBAF_DYNAMIC; 2513 2514 memcpy(brt->brt_addr, dst, ETHER_ADDR_LEN); 2515 brt->brt_vlan = vlan; 2516 2517 if ((error = bridge_rtnode_insert(sc, brt)) != 0) { 2518 uma_zfree(bridge_rtnode_zone, brt); 2519 return (error); 2520 } 2521 brt->brt_dst = bif; 2522 bif->bif_addrcnt++; 2523 } 2524 2525 if ((brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC && 2526 brt->brt_dst != bif) { 2527 brt->brt_dst->bif_addrcnt--; 2528 brt->brt_dst = bif; 2529 brt->brt_dst->bif_addrcnt++; 2530 } 2531 2532 if ((flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC) 2533 brt->brt_expire = time_uptime + sc->sc_brttimeout; 2534 if (setflags) 2535 brt->brt_flags = flags; 2536 2537 return (0); 2538 } 2539 2540 /* 2541 * bridge_rtlookup: 2542 * 2543 * Lookup the destination interface for an address. 2544 */ 2545 static struct ifnet * 2546 bridge_rtlookup(struct bridge_softc *sc, const uint8_t *addr, uint16_t vlan) 2547 { 2548 struct bridge_rtnode *brt; 2549 2550 BRIDGE_LOCK_ASSERT(sc); 2551 2552 if ((brt = bridge_rtnode_lookup(sc, addr, vlan)) == NULL) 2553 return (NULL); 2554 2555 return (brt->brt_ifp); 2556 } 2557 2558 /* 2559 * bridge_rttrim: 2560 * 2561 * Trim the routine table so that we have a number 2562 * of routing entries less than or equal to the 2563 * maximum number. 2564 */ 2565 static void 2566 bridge_rttrim(struct bridge_softc *sc) 2567 { 2568 struct bridge_rtnode *brt, *nbrt; 2569 2570 BRIDGE_LOCK_ASSERT(sc); 2571 2572 /* Make sure we actually need to do this. */ 2573 if (sc->sc_brtcnt <= sc->sc_brtmax) 2574 return; 2575 2576 /* Force an aging cycle; this might trim enough addresses. */ 2577 bridge_rtage(sc); 2578 if (sc->sc_brtcnt <= sc->sc_brtmax) 2579 return; 2580 2581 LIST_FOREACH_SAFE(brt, &sc->sc_rtlist, brt_list, nbrt) { 2582 if ((brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC) { 2583 bridge_rtnode_destroy(sc, brt); 2584 if (sc->sc_brtcnt <= sc->sc_brtmax) 2585 return; 2586 } 2587 } 2588 } 2589 2590 /* 2591 * bridge_timer: 2592 * 2593 * Aging timer for the bridge. 2594 */ 2595 static void 2596 bridge_timer(void *arg) 2597 { 2598 struct bridge_softc *sc = arg; 2599 2600 BRIDGE_LOCK_ASSERT(sc); 2601 2602 bridge_rtage(sc); 2603 2604 if (sc->sc_ifp->if_drv_flags & IFF_DRV_RUNNING) 2605 callout_reset(&sc->sc_brcallout, 2606 bridge_rtable_prune_period * hz, bridge_timer, sc); 2607 } 2608 2609 /* 2610 * bridge_rtage: 2611 * 2612 * Perform an aging cycle. 2613 */ 2614 static void 2615 bridge_rtage(struct bridge_softc *sc) 2616 { 2617 struct bridge_rtnode *brt, *nbrt; 2618 2619 BRIDGE_LOCK_ASSERT(sc); 2620 2621 LIST_FOREACH_SAFE(brt, &sc->sc_rtlist, brt_list, nbrt) { 2622 if ((brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC) { 2623 if (time_uptime >= brt->brt_expire) 2624 bridge_rtnode_destroy(sc, brt); 2625 } 2626 } 2627 } 2628 2629 /* 2630 * bridge_rtflush: 2631 * 2632 * Remove all dynamic addresses from the bridge. 2633 */ 2634 static void 2635 bridge_rtflush(struct bridge_softc *sc, int full) 2636 { 2637 struct bridge_rtnode *brt, *nbrt; 2638 2639 BRIDGE_LOCK_ASSERT(sc); 2640 2641 LIST_FOREACH_SAFE(brt, &sc->sc_rtlist, brt_list, nbrt) { 2642 if (full || (brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC) 2643 bridge_rtnode_destroy(sc, brt); 2644 } 2645 } 2646 2647 /* 2648 * bridge_rtdaddr: 2649 * 2650 * Remove an address from the table. 2651 */ 2652 static int 2653 bridge_rtdaddr(struct bridge_softc *sc, const uint8_t *addr, uint16_t vlan) 2654 { 2655 struct bridge_rtnode *brt; 2656 int found = 0; 2657 2658 BRIDGE_LOCK_ASSERT(sc); 2659 2660 /* 2661 * If vlan is zero then we want to delete for all vlans so the lookup 2662 * may return more than one. 2663 */ 2664 while ((brt = bridge_rtnode_lookup(sc, addr, vlan)) != NULL) { 2665 bridge_rtnode_destroy(sc, brt); 2666 found = 1; 2667 } 2668 2669 return (found ? 0 : ENOENT); 2670 } 2671 2672 /* 2673 * bridge_rtdelete: 2674 * 2675 * Delete routes to a speicifc member interface. 2676 */ 2677 static void 2678 bridge_rtdelete(struct bridge_softc *sc, struct ifnet *ifp, int full) 2679 { 2680 struct bridge_rtnode *brt, *nbrt; 2681 2682 BRIDGE_LOCK_ASSERT(sc); 2683 2684 LIST_FOREACH_SAFE(brt, &sc->sc_rtlist, brt_list, nbrt) { 2685 if (brt->brt_ifp == ifp && (full || 2686 (brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC)) 2687 bridge_rtnode_destroy(sc, brt); 2688 } 2689 } 2690 2691 /* 2692 * bridge_rtable_init: 2693 * 2694 * Initialize the route table for this bridge. 2695 */ 2696 static int 2697 bridge_rtable_init(struct bridge_softc *sc) 2698 { 2699 int i; 2700 2701 sc->sc_rthash = malloc(sizeof(*sc->sc_rthash) * BRIDGE_RTHASH_SIZE, 2702 M_DEVBUF, M_NOWAIT); 2703 if (sc->sc_rthash == NULL) 2704 return (ENOMEM); 2705 2706 for (i = 0; i < BRIDGE_RTHASH_SIZE; i++) 2707 LIST_INIT(&sc->sc_rthash[i]); 2708 2709 sc->sc_rthash_key = arc4random(); 2710 2711 LIST_INIT(&sc->sc_rtlist); 2712 2713 return (0); 2714 } 2715 2716 /* 2717 * bridge_rtable_fini: 2718 * 2719 * Deconstruct the route table for this bridge. 2720 */ 2721 static void 2722 bridge_rtable_fini(struct bridge_softc *sc) 2723 { 2724 2725 KASSERT(sc->sc_brtcnt == 0, 2726 ("%s: %d bridge routes referenced", __func__, sc->sc_brtcnt)); 2727 free(sc->sc_rthash, M_DEVBUF); 2728 } 2729 2730 /* 2731 * The following hash function is adapted from "Hash Functions" by Bob Jenkins 2732 * ("Algorithm Alley", Dr. Dobbs Journal, September 1997). 2733 */ 2734 #define mix(a, b, c) \ 2735 do { \ 2736 a -= b; a -= c; a ^= (c >> 13); \ 2737 b -= c; b -= a; b ^= (a << 8); \ 2738 c -= a; c -= b; c ^= (b >> 13); \ 2739 a -= b; a -= c; a ^= (c >> 12); \ 2740 b -= c; b -= a; b ^= (a << 16); \ 2741 c -= a; c -= b; c ^= (b >> 5); \ 2742 a -= b; a -= c; a ^= (c >> 3); \ 2743 b -= c; b -= a; b ^= (a << 10); \ 2744 c -= a; c -= b; c ^= (b >> 15); \ 2745 } while (/*CONSTCOND*/0) 2746 2747 static __inline uint32_t 2748 bridge_rthash(struct bridge_softc *sc, const uint8_t *addr) 2749 { 2750 uint32_t a = 0x9e3779b9, b = 0x9e3779b9, c = sc->sc_rthash_key; 2751 2752 b += addr[5] << 8; 2753 b += addr[4]; 2754 a += addr[3] << 24; 2755 a += addr[2] << 16; 2756 a += addr[1] << 8; 2757 a += addr[0]; 2758 2759 mix(a, b, c); 2760 2761 return (c & BRIDGE_RTHASH_MASK); 2762 } 2763 2764 #undef mix 2765 2766 static int 2767 bridge_rtnode_addr_cmp(const uint8_t *a, const uint8_t *b) 2768 { 2769 int i, d; 2770 2771 for (i = 0, d = 0; i < ETHER_ADDR_LEN && d == 0; i++) { 2772 d = ((int)a[i]) - ((int)b[i]); 2773 } 2774 2775 return (d); 2776 } 2777 2778 /* 2779 * bridge_rtnode_lookup: 2780 * 2781 * Look up a bridge route node for the specified destination. Compare the 2782 * vlan id or if zero then just return the first match. 2783 */ 2784 static struct bridge_rtnode * 2785 bridge_rtnode_lookup(struct bridge_softc *sc, const uint8_t *addr, uint16_t vlan) 2786 { 2787 struct bridge_rtnode *brt; 2788 uint32_t hash; 2789 int dir; 2790 2791 BRIDGE_LOCK_ASSERT(sc); 2792 2793 hash = bridge_rthash(sc, addr); 2794 LIST_FOREACH(brt, &sc->sc_rthash[hash], brt_hash) { 2795 dir = bridge_rtnode_addr_cmp(addr, brt->brt_addr); 2796 if (dir == 0 && (brt->brt_vlan == vlan || vlan == 0)) 2797 return (brt); 2798 if (dir > 0) 2799 return (NULL); 2800 } 2801 2802 return (NULL); 2803 } 2804 2805 /* 2806 * bridge_rtnode_insert: 2807 * 2808 * Insert the specified bridge node into the route table. We 2809 * assume the entry is not already in the table. 2810 */ 2811 static int 2812 bridge_rtnode_insert(struct bridge_softc *sc, struct bridge_rtnode *brt) 2813 { 2814 struct bridge_rtnode *lbrt; 2815 uint32_t hash; 2816 int dir; 2817 2818 BRIDGE_LOCK_ASSERT(sc); 2819 2820 hash = bridge_rthash(sc, brt->brt_addr); 2821 2822 lbrt = LIST_FIRST(&sc->sc_rthash[hash]); 2823 if (lbrt == NULL) { 2824 LIST_INSERT_HEAD(&sc->sc_rthash[hash], brt, brt_hash); 2825 goto out; 2826 } 2827 2828 do { 2829 dir = bridge_rtnode_addr_cmp(brt->brt_addr, lbrt->brt_addr); 2830 if (dir == 0 && brt->brt_vlan == lbrt->brt_vlan) 2831 return (EEXIST); 2832 if (dir > 0) { 2833 LIST_INSERT_BEFORE(lbrt, brt, brt_hash); 2834 goto out; 2835 } 2836 if (LIST_NEXT(lbrt, brt_hash) == NULL) { 2837 LIST_INSERT_AFTER(lbrt, brt, brt_hash); 2838 goto out; 2839 } 2840 lbrt = LIST_NEXT(lbrt, brt_hash); 2841 } while (lbrt != NULL); 2842 2843 #ifdef DIAGNOSTIC 2844 panic("bridge_rtnode_insert: impossible"); 2845 #endif 2846 2847 out: 2848 LIST_INSERT_HEAD(&sc->sc_rtlist, brt, brt_list); 2849 sc->sc_brtcnt++; 2850 2851 return (0); 2852 } 2853 2854 /* 2855 * bridge_rtnode_destroy: 2856 * 2857 * Destroy a bridge rtnode. 2858 */ 2859 static void 2860 bridge_rtnode_destroy(struct bridge_softc *sc, struct bridge_rtnode *brt) 2861 { 2862 BRIDGE_LOCK_ASSERT(sc); 2863 2864 LIST_REMOVE(brt, brt_hash); 2865 2866 LIST_REMOVE(brt, brt_list); 2867 sc->sc_brtcnt--; 2868 brt->brt_dst->bif_addrcnt--; 2869 uma_zfree(bridge_rtnode_zone, brt); 2870 } 2871 2872 /* 2873 * bridge_rtable_expire: 2874 * 2875 * Set the expiry time for all routes on an interface. 2876 */ 2877 static void 2878 bridge_rtable_expire(struct ifnet *ifp, int age) 2879 { 2880 struct bridge_softc *sc = ifp->if_bridge; 2881 struct bridge_rtnode *brt; 2882 2883 BRIDGE_LOCK(sc); 2884 2885 /* 2886 * If the age is zero then flush, otherwise set all the expiry times to 2887 * age for the interface 2888 */ 2889 if (age == 0) 2890 bridge_rtdelete(sc, ifp, IFBF_FLUSHDYN); 2891 else { 2892 LIST_FOREACH(brt, &sc->sc_rtlist, brt_list) { 2893 /* Cap the expiry time to 'age' */ 2894 if (brt->brt_ifp == ifp && 2895 brt->brt_expire > time_uptime + age && 2896 (brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC) 2897 brt->brt_expire = time_uptime + age; 2898 } 2899 } 2900 BRIDGE_UNLOCK(sc); 2901 } 2902 2903 /* 2904 * bridge_state_change: 2905 * 2906 * Callback from the bridgestp code when a port changes states. 2907 */ 2908 static void 2909 bridge_state_change(struct ifnet *ifp, int state) 2910 { 2911 struct bridge_softc *sc = ifp->if_bridge; 2912 static const char *stpstates[] = { 2913 "disabled", 2914 "listening", 2915 "learning", 2916 "forwarding", 2917 "blocking", 2918 "discarding" 2919 }; 2920 2921 if (log_stp) 2922 log(LOG_NOTICE, "%s: state changed to %s on %s\n", 2923 sc->sc_ifp->if_xname, stpstates[state], ifp->if_xname); 2924 } 2925 2926 /* 2927 * Send bridge packets through pfil if they are one of the types pfil can deal 2928 * with, or if they are ARP or REVARP. (pfil will pass ARP and REVARP without 2929 * question.) If *bifp or *ifp are NULL then packet filtering is skipped for 2930 * that interface. 2931 */ 2932 static int 2933 bridge_pfil(struct mbuf **mp, struct ifnet *bifp, struct ifnet *ifp, int dir) 2934 { 2935 int snap, error, i, hlen; 2936 struct ether_header *eh1, eh2; 2937 struct ip_fw_args args; 2938 struct ip *ip; 2939 struct llc llc1; 2940 u_int16_t ether_type; 2941 2942 snap = 0; 2943 error = -1; /* Default error if not error == 0 */ 2944 2945 #if 0 2946 /* we may return with the IP fields swapped, ensure its not shared */ 2947 KASSERT(M_WRITABLE(*mp), ("%s: modifying a shared mbuf", __func__)); 2948 #endif 2949 2950 if (pfil_bridge == 0 && pfil_member == 0 && pfil_ipfw == 0) 2951 return (0); /* filtering is disabled */ 2952 2953 i = min((*mp)->m_pkthdr.len, max_protohdr); 2954 if ((*mp)->m_len < i) { 2955 *mp = m_pullup(*mp, i); 2956 if (*mp == NULL) { 2957 printf("%s: m_pullup failed\n", __func__); 2958 return (-1); 2959 } 2960 } 2961 2962 eh1 = mtod(*mp, struct ether_header *); 2963 ether_type = ntohs(eh1->ether_type); 2964 2965 /* 2966 * Check for SNAP/LLC. 2967 */ 2968 if (ether_type < ETHERMTU) { 2969 struct llc *llc2 = (struct llc *)(eh1 + 1); 2970 2971 if ((*mp)->m_len >= ETHER_HDR_LEN + 8 && 2972 llc2->llc_dsap == LLC_SNAP_LSAP && 2973 llc2->llc_ssap == LLC_SNAP_LSAP && 2974 llc2->llc_control == LLC_UI) { 2975 ether_type = htons(llc2->llc_un.type_snap.ether_type); 2976 snap = 1; 2977 } 2978 } 2979 2980 /* 2981 * If we're trying to filter bridge traffic, don't look at anything 2982 * other than IP and ARP traffic. If the filter doesn't understand 2983 * IPv6, don't allow IPv6 through the bridge either. This is lame 2984 * since if we really wanted, say, an AppleTalk filter, we are hosed, 2985 * but of course we don't have an AppleTalk filter to begin with. 2986 * (Note that since pfil doesn't understand ARP it will pass *ALL* 2987 * ARP traffic.) 2988 */ 2989 switch (ether_type) { 2990 case ETHERTYPE_ARP: 2991 case ETHERTYPE_REVARP: 2992 if (pfil_ipfw_arp == 0) 2993 return (0); /* Automatically pass */ 2994 break; 2995 2996 case ETHERTYPE_IP: 2997 #ifdef INET6 2998 case ETHERTYPE_IPV6: 2999 #endif /* INET6 */ 3000 break; 3001 default: 3002 /* 3003 * Check to see if the user wants to pass non-ip 3004 * packets, these will not be checked by pfil(9) and 3005 * passed unconditionally so the default is to drop. 3006 */ 3007 if (pfil_onlyip) 3008 goto bad; 3009 } 3010 3011 /* Strip off the Ethernet header and keep a copy. */ 3012 m_copydata(*mp, 0, ETHER_HDR_LEN, (caddr_t) &eh2); 3013 m_adj(*mp, ETHER_HDR_LEN); 3014 3015 /* Strip off snap header, if present */ 3016 if (snap) { 3017 m_copydata(*mp, 0, sizeof(struct llc), (caddr_t) &llc1); 3018 m_adj(*mp, sizeof(struct llc)); 3019 } 3020 3021 /* 3022 * Check the IP header for alignment and errors 3023 */ 3024 if (dir == PFIL_IN) { 3025 switch (ether_type) { 3026 case ETHERTYPE_IP: 3027 error = bridge_ip_checkbasic(mp); 3028 break; 3029 #ifdef INET6 3030 case ETHERTYPE_IPV6: 3031 error = bridge_ip6_checkbasic(mp); 3032 break; 3033 #endif /* INET6 */ 3034 default: 3035 error = 0; 3036 } 3037 if (error) 3038 goto bad; 3039 } 3040 3041 if (ip_fw_chk_ptr && pfil_ipfw != 0 && dir == PFIL_OUT && ifp != NULL) { 3042 struct dn_pkt_tag *dn_tag; 3043 3044 error = -1; 3045 dn_tag = ip_dn_claim_tag(*mp); 3046 if (dn_tag != NULL) { 3047 if (dn_tag->rule != NULL && V_fw_one_pass) 3048 /* packet already partially processed */ 3049 goto ipfwpass; 3050 args.rule = dn_tag->rule; /* matching rule to restart */ 3051 args.rule_id = dn_tag->rule_id; 3052 args.chain_id = dn_tag->chain_id; 3053 } else 3054 args.rule = NULL; 3055 3056 args.m = *mp; 3057 args.oif = ifp; 3058 args.next_hop = NULL; 3059 args.eh = &eh2; 3060 args.inp = NULL; /* used by ipfw uid/gid/jail rules */ 3061 i = ip_fw_chk_ptr(&args); 3062 *mp = args.m; 3063 3064 if (*mp == NULL) 3065 return (error); 3066 3067 if (ip_dn_io_ptr && (i == IP_FW_DUMMYNET)) { 3068 3069 /* put the Ethernet header back on */ 3070 M_PREPEND(*mp, ETHER_HDR_LEN, M_DONTWAIT); 3071 if (*mp == NULL) 3072 return (error); 3073 bcopy(&eh2, mtod(*mp, caddr_t), ETHER_HDR_LEN); 3074 3075 /* 3076 * Pass the pkt to dummynet, which consumes it. The 3077 * packet will return to us via bridge_dummynet(). 3078 */ 3079 args.oif = ifp; 3080 ip_dn_io_ptr(mp, DN_TO_IFB_FWD, &args); 3081 return (error); 3082 } 3083 3084 if (i != IP_FW_PASS) /* drop */ 3085 goto bad; 3086 } 3087 3088 ipfwpass: 3089 error = 0; 3090 3091 /* 3092 * Run the packet through pfil 3093 */ 3094 switch (ether_type) { 3095 case ETHERTYPE_IP: 3096 /* 3097 * before calling the firewall, swap fields the same as 3098 * IP does. here we assume the header is contiguous 3099 */ 3100 ip = mtod(*mp, struct ip *); 3101 3102 ip->ip_len = ntohs(ip->ip_len); 3103 ip->ip_off = ntohs(ip->ip_off); 3104 3105 /* 3106 * Run pfil on the member interface and the bridge, both can 3107 * be skipped by clearing pfil_member or pfil_bridge. 3108 * 3109 * Keep the order: 3110 * in_if -> bridge_if -> out_if 3111 */ 3112 if (pfil_bridge && dir == PFIL_OUT && bifp != NULL) 3113 error = pfil_run_hooks(&inet_pfil_hook, mp, bifp, 3114 dir, NULL); 3115 3116 if (*mp == NULL || error != 0) /* filter may consume */ 3117 break; 3118 3119 if (pfil_member && ifp != NULL) 3120 error = pfil_run_hooks(&inet_pfil_hook, mp, ifp, 3121 dir, NULL); 3122 3123 if (*mp == NULL || error != 0) /* filter may consume */ 3124 break; 3125 3126 if (pfil_bridge && dir == PFIL_IN && bifp != NULL) 3127 error = pfil_run_hooks(&inet_pfil_hook, mp, bifp, 3128 dir, NULL); 3129 3130 if (*mp == NULL || error != 0) /* filter may consume */ 3131 break; 3132 3133 /* check if we need to fragment the packet */ 3134 if (pfil_member && ifp != NULL && dir == PFIL_OUT) { 3135 i = (*mp)->m_pkthdr.len; 3136 if (i > ifp->if_mtu) { 3137 error = bridge_fragment(ifp, *mp, &eh2, snap, 3138 &llc1); 3139 return (error); 3140 } 3141 } 3142 3143 /* Recalculate the ip checksum and restore byte ordering */ 3144 ip = mtod(*mp, struct ip *); 3145 hlen = ip->ip_hl << 2; 3146 if (hlen < sizeof(struct ip)) 3147 goto bad; 3148 if (hlen > (*mp)->m_len) { 3149 if ((*mp = m_pullup(*mp, hlen)) == 0) 3150 goto bad; 3151 ip = mtod(*mp, struct ip *); 3152 if (ip == NULL) 3153 goto bad; 3154 } 3155 ip->ip_len = htons(ip->ip_len); 3156 ip->ip_off = htons(ip->ip_off); 3157 ip->ip_sum = 0; 3158 if (hlen == sizeof(struct ip)) 3159 ip->ip_sum = in_cksum_hdr(ip); 3160 else 3161 ip->ip_sum = in_cksum(*mp, hlen); 3162 3163 break; 3164 #ifdef INET6 3165 case ETHERTYPE_IPV6: 3166 if (pfil_bridge && dir == PFIL_OUT && bifp != NULL) 3167 error = pfil_run_hooks(&inet6_pfil_hook, mp, bifp, 3168 dir, NULL); 3169 3170 if (*mp == NULL || error != 0) /* filter may consume */ 3171 break; 3172 3173 if (pfil_member && ifp != NULL) 3174 error = pfil_run_hooks(&inet6_pfil_hook, mp, ifp, 3175 dir, NULL); 3176 3177 if (*mp == NULL || error != 0) /* filter may consume */ 3178 break; 3179 3180 if (pfil_bridge && dir == PFIL_IN && bifp != NULL) 3181 error = pfil_run_hooks(&inet6_pfil_hook, mp, bifp, 3182 dir, NULL); 3183 break; 3184 #endif 3185 default: 3186 error = 0; 3187 break; 3188 } 3189 3190 if (*mp == NULL) 3191 return (error); 3192 if (error != 0) 3193 goto bad; 3194 3195 error = -1; 3196 3197 /* 3198 * Finally, put everything back the way it was and return 3199 */ 3200 if (snap) { 3201 M_PREPEND(*mp, sizeof(struct llc), M_DONTWAIT); 3202 if (*mp == NULL) 3203 return (error); 3204 bcopy(&llc1, mtod(*mp, caddr_t), sizeof(struct llc)); 3205 } 3206 3207 M_PREPEND(*mp, ETHER_HDR_LEN, M_DONTWAIT); 3208 if (*mp == NULL) 3209 return (error); 3210 bcopy(&eh2, mtod(*mp, caddr_t), ETHER_HDR_LEN); 3211 3212 return (0); 3213 3214 bad: 3215 m_freem(*mp); 3216 *mp = NULL; 3217 return (error); 3218 } 3219 3220 /* 3221 * Perform basic checks on header size since 3222 * pfil assumes ip_input has already processed 3223 * it for it. Cut-and-pasted from ip_input.c. 3224 * Given how simple the IPv6 version is, 3225 * does the IPv4 version really need to be 3226 * this complicated? 3227 * 3228 * XXX Should we update ipstat here, or not? 3229 * XXX Right now we update ipstat but not 3230 * XXX csum_counter. 3231 */ 3232 static int 3233 bridge_ip_checkbasic(struct mbuf **mp) 3234 { 3235 struct mbuf *m = *mp; 3236 struct ip *ip; 3237 int len, hlen; 3238 u_short sum; 3239 3240 if (*mp == NULL) 3241 return (-1); 3242 3243 if (IP_HDR_ALIGNED_P(mtod(m, caddr_t)) == 0) { 3244 if ((m = m_copyup(m, sizeof(struct ip), 3245 (max_linkhdr + 3) & ~3)) == NULL) { 3246 /* XXXJRT new stat, please */ 3247 KMOD_IPSTAT_INC(ips_toosmall); 3248 goto bad; 3249 } 3250 } else if (__predict_false(m->m_len < sizeof (struct ip))) { 3251 if ((m = m_pullup(m, sizeof (struct ip))) == NULL) { 3252 KMOD_IPSTAT_INC(ips_toosmall); 3253 goto bad; 3254 } 3255 } 3256 ip = mtod(m, struct ip *); 3257 if (ip == NULL) goto bad; 3258 3259 if (ip->ip_v != IPVERSION) { 3260 KMOD_IPSTAT_INC(ips_badvers); 3261 goto bad; 3262 } 3263 hlen = ip->ip_hl << 2; 3264 if (hlen < sizeof(struct ip)) { /* minimum header length */ 3265 KMOD_IPSTAT_INC(ips_badhlen); 3266 goto bad; 3267 } 3268 if (hlen > m->m_len) { 3269 if ((m = m_pullup(m, hlen)) == 0) { 3270 KMOD_IPSTAT_INC(ips_badhlen); 3271 goto bad; 3272 } 3273 ip = mtod(m, struct ip *); 3274 if (ip == NULL) goto bad; 3275 } 3276 3277 if (m->m_pkthdr.csum_flags & CSUM_IP_CHECKED) { 3278 sum = !(m->m_pkthdr.csum_flags & CSUM_IP_VALID); 3279 } else { 3280 if (hlen == sizeof(struct ip)) { 3281 sum = in_cksum_hdr(ip); 3282 } else { 3283 sum = in_cksum(m, hlen); 3284 } 3285 } 3286 if (sum) { 3287 KMOD_IPSTAT_INC(ips_badsum); 3288 goto bad; 3289 } 3290 3291 /* Retrieve the packet length. */ 3292 len = ntohs(ip->ip_len); 3293 3294 /* 3295 * Check for additional length bogosity 3296 */ 3297 if (len < hlen) { 3298 KMOD_IPSTAT_INC(ips_badlen); 3299 goto bad; 3300 } 3301 3302 /* 3303 * Check that the amount of data in the buffers 3304 * is as at least much as the IP header would have us expect. 3305 * Drop packet if shorter than we expect. 3306 */ 3307 if (m->m_pkthdr.len < len) { 3308 KMOD_IPSTAT_INC(ips_tooshort); 3309 goto bad; 3310 } 3311 3312 /* Checks out, proceed */ 3313 *mp = m; 3314 return (0); 3315 3316 bad: 3317 *mp = m; 3318 return (-1); 3319 } 3320 3321 #ifdef INET6 3322 /* 3323 * Same as above, but for IPv6. 3324 * Cut-and-pasted from ip6_input.c. 3325 * XXX Should we update ip6stat, or not? 3326 */ 3327 static int 3328 bridge_ip6_checkbasic(struct mbuf **mp) 3329 { 3330 struct mbuf *m = *mp; 3331 struct ip6_hdr *ip6; 3332 3333 /* 3334 * If the IPv6 header is not aligned, slurp it up into a new 3335 * mbuf with space for link headers, in the event we forward 3336 * it. Otherwise, if it is aligned, make sure the entire base 3337 * IPv6 header is in the first mbuf of the chain. 3338 */ 3339 if (IP6_HDR_ALIGNED_P(mtod(m, caddr_t)) == 0) { 3340 struct ifnet *inifp = m->m_pkthdr.rcvif; 3341 if ((m = m_copyup(m, sizeof(struct ip6_hdr), 3342 (max_linkhdr + 3) & ~3)) == NULL) { 3343 /* XXXJRT new stat, please */ 3344 V_ip6stat.ip6s_toosmall++; 3345 in6_ifstat_inc(inifp, ifs6_in_hdrerr); 3346 goto bad; 3347 } 3348 } else if (__predict_false(m->m_len < sizeof(struct ip6_hdr))) { 3349 struct ifnet *inifp = m->m_pkthdr.rcvif; 3350 if ((m = m_pullup(m, sizeof(struct ip6_hdr))) == NULL) { 3351 V_ip6stat.ip6s_toosmall++; 3352 in6_ifstat_inc(inifp, ifs6_in_hdrerr); 3353 goto bad; 3354 } 3355 } 3356 3357 ip6 = mtod(m, struct ip6_hdr *); 3358 3359 if ((ip6->ip6_vfc & IPV6_VERSION_MASK) != IPV6_VERSION) { 3360 V_ip6stat.ip6s_badvers++; 3361 in6_ifstat_inc(m->m_pkthdr.rcvif, ifs6_in_hdrerr); 3362 goto bad; 3363 } 3364 3365 /* Checks out, proceed */ 3366 *mp = m; 3367 return (0); 3368 3369 bad: 3370 *mp = m; 3371 return (-1); 3372 } 3373 #endif /* INET6 */ 3374 3375 /* 3376 * bridge_fragment: 3377 * 3378 * Return a fragmented mbuf chain. 3379 */ 3380 static int 3381 bridge_fragment(struct ifnet *ifp, struct mbuf *m, struct ether_header *eh, 3382 int snap, struct llc *llc) 3383 { 3384 struct mbuf *m0; 3385 struct ip *ip; 3386 int error = -1; 3387 3388 if (m->m_len < sizeof(struct ip) && 3389 (m = m_pullup(m, sizeof(struct ip))) == NULL) 3390 goto out; 3391 ip = mtod(m, struct ip *); 3392 3393 error = ip_fragment(ip, &m, ifp->if_mtu, ifp->if_hwassist, 3394 CSUM_DELAY_IP); 3395 if (error) 3396 goto out; 3397 3398 /* walk the chain and re-add the Ethernet header */ 3399 for (m0 = m; m0; m0 = m0->m_nextpkt) { 3400 if (error == 0) { 3401 if (snap) { 3402 M_PREPEND(m0, sizeof(struct llc), M_DONTWAIT); 3403 if (m0 == NULL) { 3404 error = ENOBUFS; 3405 continue; 3406 } 3407 bcopy(llc, mtod(m0, caddr_t), 3408 sizeof(struct llc)); 3409 } 3410 M_PREPEND(m0, ETHER_HDR_LEN, M_DONTWAIT); 3411 if (m0 == NULL) { 3412 error = ENOBUFS; 3413 continue; 3414 } 3415 bcopy(eh, mtod(m0, caddr_t), ETHER_HDR_LEN); 3416 } else 3417 m_freem(m); 3418 } 3419 3420 if (error == 0) 3421 KMOD_IPSTAT_INC(ips_fragmented); 3422 3423 return (error); 3424 3425 out: 3426 if (m != NULL) 3427 m_freem(m); 3428 return (error); 3429 } 3430