1 /* $NetBSD: if_bridge.c,v 1.31 2005/06/01 19:45:34 jdc Exp $ */ 2 3 /* 4 * Copyright 2001 Wasabi Systems, Inc. 5 * All rights reserved. 6 * 7 * Written by Jason R. Thorpe for Wasabi Systems, Inc. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 3. All advertising materials mentioning features or use of this software 18 * must display the following acknowledgement: 19 * This product includes software developed for the NetBSD Project by 20 * Wasabi Systems, Inc. 21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse 22 * or promote products derived from this software without specific prior 23 * written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND 26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC 29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 35 * POSSIBILITY OF SUCH DAMAGE. 36 */ 37 38 /* 39 * Copyright (c) 1999, 2000 Jason L. Wright (jason@thought.net) 40 * All rights reserved. 41 * 42 * Redistribution and use in source and binary forms, with or without 43 * modification, are permitted provided that the following conditions 44 * are met: 45 * 1. Redistributions of source code must retain the above copyright 46 * notice, this list of conditions and the following disclaimer. 47 * 2. Redistributions in binary form must reproduce the above copyright 48 * notice, this list of conditions and the following disclaimer in the 49 * documentation and/or other materials provided with the distribution. 50 * 51 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 52 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 53 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 54 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, 55 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 56 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 57 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 58 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 59 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN 60 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 61 * POSSIBILITY OF SUCH DAMAGE. 62 * 63 * OpenBSD: if_bridge.c,v 1.60 2001/06/15 03:38:33 itojun Exp 64 */ 65 66 /* 67 * Network interface bridge support. 68 * 69 * TODO: 70 * 71 * - Currently only supports Ethernet-like interfaces (Ethernet, 72 * 802.11, VLANs on Ethernet, etc.) Figure out a nice way 73 * to bridge other types of interfaces (FDDI-FDDI, and maybe 74 * consider heterogenous bridges). 75 */ 76 77 #include <sys/cdefs.h> 78 __FBSDID("$FreeBSD$"); 79 80 #include "opt_inet.h" 81 #include "opt_inet6.h" 82 83 #include <sys/param.h> 84 #include <sys/mbuf.h> 85 #include <sys/malloc.h> 86 #include <sys/protosw.h> 87 #include <sys/systm.h> 88 #include <sys/time.h> 89 #include <sys/socket.h> /* for net/if.h */ 90 #include <sys/sockio.h> 91 #include <sys/ctype.h> /* string functions */ 92 #include <sys/kernel.h> 93 #include <sys/random.h> 94 #include <sys/syslog.h> 95 #include <sys/sysctl.h> 96 #include <vm/uma.h> 97 #include <sys/module.h> 98 #include <sys/priv.h> 99 #include <sys/proc.h> 100 #include <sys/lock.h> 101 #include <sys/mutex.h> 102 #include <sys/rwlock.h> 103 104 #include <net/bpf.h> 105 #include <net/if.h> 106 #include <net/if_clone.h> 107 #include <net/if_dl.h> 108 #include <net/if_types.h> 109 #include <net/if_var.h> 110 #include <net/pfil.h> 111 #include <net/vnet.h> 112 113 #include <netinet/in.h> /* for struct arpcom */ 114 #include <netinet/in_systm.h> 115 #include <netinet/in_var.h> 116 #include <netinet/ip.h> 117 #include <netinet/ip_var.h> 118 #ifdef INET6 119 #include <netinet/ip6.h> 120 #include <netinet6/ip6_var.h> 121 #endif 122 #if defined(INET) || defined(INET6) 123 #include <netinet/ip_carp.h> 124 #endif 125 #include <machine/in_cksum.h> 126 #include <netinet/if_ether.h> /* for struct arpcom */ 127 #include <net/bridgestp.h> 128 #include <net/if_bridgevar.h> 129 #include <net/if_llc.h> 130 #include <net/if_vlan_var.h> 131 132 #include <net/route.h> 133 #include <netinet/ip_fw.h> 134 #include <netinet/ipfw/ip_fw_private.h> 135 136 /* 137 * Size of the route hash table. Must be a power of two. 138 */ 139 #ifndef BRIDGE_RTHASH_SIZE 140 #define BRIDGE_RTHASH_SIZE 1024 141 #endif 142 143 #define BRIDGE_RTHASH_MASK (BRIDGE_RTHASH_SIZE - 1) 144 145 /* 146 * Maximum number of addresses to cache. 147 */ 148 #ifndef BRIDGE_RTABLE_MAX 149 #define BRIDGE_RTABLE_MAX 100 150 #endif 151 152 /* 153 * Timeout (in seconds) for entries learned dynamically. 154 */ 155 #ifndef BRIDGE_RTABLE_TIMEOUT 156 #define BRIDGE_RTABLE_TIMEOUT (20 * 60) /* same as ARP */ 157 #endif 158 159 /* 160 * Number of seconds between walks of the route list. 161 */ 162 #ifndef BRIDGE_RTABLE_PRUNE_PERIOD 163 #define BRIDGE_RTABLE_PRUNE_PERIOD (5 * 60) 164 #endif 165 166 /* 167 * List of capabilities to possibly mask on the member interface. 168 */ 169 #define BRIDGE_IFCAPS_MASK (IFCAP_TOE|IFCAP_TSO|IFCAP_TXCSUM) 170 171 /* 172 * List of capabilities to strip 173 */ 174 #define BRIDGE_IFCAPS_STRIP IFCAP_LRO 175 176 /* 177 * Bridge interface list entry. 178 */ 179 struct bridge_iflist { 180 LIST_ENTRY(bridge_iflist) bif_next; 181 struct ifnet *bif_ifp; /* member if */ 182 struct bstp_port bif_stp; /* STP state */ 183 uint32_t bif_flags; /* member if flags */ 184 int bif_savedcaps; /* saved capabilities */ 185 uint32_t bif_addrmax; /* max # of addresses */ 186 uint32_t bif_addrcnt; /* cur. # of addresses */ 187 uint32_t bif_addrexceeded;/* # of address violations */ 188 }; 189 190 /* 191 * Bridge route node. 192 */ 193 struct bridge_rtnode { 194 LIST_ENTRY(bridge_rtnode) brt_hash; /* hash table linkage */ 195 LIST_ENTRY(bridge_rtnode) brt_list; /* list linkage */ 196 struct bridge_iflist *brt_dst; /* destination if */ 197 unsigned long brt_expire; /* expiration time */ 198 uint8_t brt_flags; /* address flags */ 199 uint8_t brt_addr[ETHER_ADDR_LEN]; 200 uint16_t brt_vlan; /* vlan id */ 201 }; 202 #define brt_ifp brt_dst->bif_ifp 203 204 /* 205 * Software state for each bridge. 206 */ 207 struct bridge_softc { 208 struct ifnet *sc_ifp; /* make this an interface */ 209 LIST_ENTRY(bridge_softc) sc_list; 210 struct mtx sc_mtx; 211 struct cv sc_cv; 212 uint32_t sc_brtmax; /* max # of addresses */ 213 uint32_t sc_brtcnt; /* cur. # of addresses */ 214 uint32_t sc_brttimeout; /* rt timeout in seconds */ 215 struct callout sc_brcallout; /* bridge callout */ 216 uint32_t sc_iflist_ref; /* refcount for sc_iflist */ 217 uint32_t sc_iflist_xcnt; /* refcount for sc_iflist */ 218 LIST_HEAD(, bridge_iflist) sc_iflist; /* member interface list */ 219 LIST_HEAD(, bridge_rtnode) *sc_rthash; /* our forwarding table */ 220 LIST_HEAD(, bridge_rtnode) sc_rtlist; /* list version of above */ 221 uint32_t sc_rthash_key; /* key for hash */ 222 LIST_HEAD(, bridge_iflist) sc_spanlist; /* span ports list */ 223 struct bstp_state sc_stp; /* STP state */ 224 uint32_t sc_brtexceeded; /* # of cache drops */ 225 struct ifnet *sc_ifaddr; /* member mac copied from */ 226 u_char sc_defaddr[6]; /* Default MAC address */ 227 }; 228 229 static struct mtx bridge_list_mtx; 230 eventhandler_tag bridge_detach_cookie = NULL; 231 232 int bridge_rtable_prune_period = BRIDGE_RTABLE_PRUNE_PERIOD; 233 234 uma_zone_t bridge_rtnode_zone; 235 236 static int bridge_clone_create(struct if_clone *, int, caddr_t); 237 static void bridge_clone_destroy(struct ifnet *); 238 239 static int bridge_ioctl(struct ifnet *, u_long, caddr_t); 240 static void bridge_mutecaps(struct bridge_softc *); 241 static void bridge_set_ifcap(struct bridge_softc *, struct bridge_iflist *, 242 int); 243 static void bridge_ifdetach(void *arg __unused, struct ifnet *); 244 static void bridge_init(void *); 245 static void bridge_dummynet(struct mbuf *, struct ifnet *); 246 static void bridge_stop(struct ifnet *, int); 247 static void bridge_start(struct ifnet *); 248 static struct mbuf *bridge_input(struct ifnet *, struct mbuf *); 249 static int bridge_output(struct ifnet *, struct mbuf *, struct sockaddr *, 250 struct rtentry *); 251 static void bridge_enqueue(struct bridge_softc *, struct ifnet *, 252 struct mbuf *); 253 static void bridge_rtdelete(struct bridge_softc *, struct ifnet *ifp, int); 254 255 static void bridge_forward(struct bridge_softc *, struct bridge_iflist *, 256 struct mbuf *m); 257 258 static void bridge_timer(void *); 259 260 static void bridge_broadcast(struct bridge_softc *, struct ifnet *, 261 struct mbuf *, int); 262 static void bridge_span(struct bridge_softc *, struct mbuf *); 263 264 static int bridge_rtupdate(struct bridge_softc *, const uint8_t *, 265 uint16_t, struct bridge_iflist *, int, uint8_t); 266 static struct ifnet *bridge_rtlookup(struct bridge_softc *, const uint8_t *, 267 uint16_t); 268 static void bridge_rttrim(struct bridge_softc *); 269 static void bridge_rtage(struct bridge_softc *); 270 static void bridge_rtflush(struct bridge_softc *, int); 271 static int bridge_rtdaddr(struct bridge_softc *, const uint8_t *, 272 uint16_t); 273 274 static int bridge_rtable_init(struct bridge_softc *); 275 static void bridge_rtable_fini(struct bridge_softc *); 276 277 static int bridge_rtnode_addr_cmp(const uint8_t *, const uint8_t *); 278 static struct bridge_rtnode *bridge_rtnode_lookup(struct bridge_softc *, 279 const uint8_t *, uint16_t); 280 static int bridge_rtnode_insert(struct bridge_softc *, 281 struct bridge_rtnode *); 282 static void bridge_rtnode_destroy(struct bridge_softc *, 283 struct bridge_rtnode *); 284 static void bridge_rtable_expire(struct ifnet *, int); 285 static void bridge_state_change(struct ifnet *, int); 286 287 static struct bridge_iflist *bridge_lookup_member(struct bridge_softc *, 288 const char *name); 289 static struct bridge_iflist *bridge_lookup_member_if(struct bridge_softc *, 290 struct ifnet *ifp); 291 static void bridge_delete_member(struct bridge_softc *, 292 struct bridge_iflist *, int); 293 static void bridge_delete_span(struct bridge_softc *, 294 struct bridge_iflist *); 295 296 static int bridge_ioctl_add(struct bridge_softc *, void *); 297 static int bridge_ioctl_del(struct bridge_softc *, void *); 298 static int bridge_ioctl_gifflags(struct bridge_softc *, void *); 299 static int bridge_ioctl_sifflags(struct bridge_softc *, void *); 300 static int bridge_ioctl_scache(struct bridge_softc *, void *); 301 static int bridge_ioctl_gcache(struct bridge_softc *, void *); 302 static int bridge_ioctl_gifs(struct bridge_softc *, void *); 303 static int bridge_ioctl_rts(struct bridge_softc *, void *); 304 static int bridge_ioctl_saddr(struct bridge_softc *, void *); 305 static int bridge_ioctl_sto(struct bridge_softc *, void *); 306 static int bridge_ioctl_gto(struct bridge_softc *, void *); 307 static int bridge_ioctl_daddr(struct bridge_softc *, void *); 308 static int bridge_ioctl_flush(struct bridge_softc *, void *); 309 static int bridge_ioctl_gpri(struct bridge_softc *, void *); 310 static int bridge_ioctl_spri(struct bridge_softc *, void *); 311 static int bridge_ioctl_ght(struct bridge_softc *, void *); 312 static int bridge_ioctl_sht(struct bridge_softc *, void *); 313 static int bridge_ioctl_gfd(struct bridge_softc *, void *); 314 static int bridge_ioctl_sfd(struct bridge_softc *, void *); 315 static int bridge_ioctl_gma(struct bridge_softc *, void *); 316 static int bridge_ioctl_sma(struct bridge_softc *, void *); 317 static int bridge_ioctl_sifprio(struct bridge_softc *, void *); 318 static int bridge_ioctl_sifcost(struct bridge_softc *, void *); 319 static int bridge_ioctl_sifmaxaddr(struct bridge_softc *, void *); 320 static int bridge_ioctl_addspan(struct bridge_softc *, void *); 321 static int bridge_ioctl_delspan(struct bridge_softc *, void *); 322 static int bridge_ioctl_gbparam(struct bridge_softc *, void *); 323 static int bridge_ioctl_grte(struct bridge_softc *, void *); 324 static int bridge_ioctl_gifsstp(struct bridge_softc *, void *); 325 static int bridge_ioctl_sproto(struct bridge_softc *, void *); 326 static int bridge_ioctl_stxhc(struct bridge_softc *, void *); 327 static int bridge_pfil(struct mbuf **, struct ifnet *, struct ifnet *, 328 int); 329 static int bridge_ip_checkbasic(struct mbuf **mp); 330 #ifdef INET6 331 static int bridge_ip6_checkbasic(struct mbuf **mp); 332 #endif /* INET6 */ 333 static int bridge_fragment(struct ifnet *, struct mbuf *, 334 struct ether_header *, int, struct llc *); 335 336 /* The default bridge vlan is 1 (IEEE 802.1Q-2003 Table 9-2) */ 337 #define VLANTAGOF(_m) \ 338 (_m->m_flags & M_VLANTAG) ? EVL_VLANOFTAG(_m->m_pkthdr.ether_vtag) : 1 339 340 static struct bstp_cb_ops bridge_ops = { 341 .bcb_state = bridge_state_change, 342 .bcb_rtage = bridge_rtable_expire 343 }; 344 345 SYSCTL_DECL(_net_link); 346 SYSCTL_NODE(_net_link, IFT_BRIDGE, bridge, CTLFLAG_RW, 0, "Bridge"); 347 348 static int pfil_onlyip = 1; /* only pass IP[46] packets when pfil is enabled */ 349 static int pfil_bridge = 1; /* run pfil hooks on the bridge interface */ 350 static int pfil_member = 1; /* run pfil hooks on the member interface */ 351 static int pfil_ipfw = 0; /* layer2 filter with ipfw */ 352 static int pfil_ipfw_arp = 0; /* layer2 filter with ipfw */ 353 static int pfil_local_phys = 0; /* run pfil hooks on the physical interface for 354 locally destined packets */ 355 static int log_stp = 0; /* log STP state changes */ 356 static int bridge_inherit_mac = 0; /* share MAC with first bridge member */ 357 SYSCTL_INT(_net_link_bridge, OID_AUTO, pfil_onlyip, CTLFLAG_RW, 358 &pfil_onlyip, 0, "Only pass IP packets when pfil is enabled"); 359 SYSCTL_INT(_net_link_bridge, OID_AUTO, ipfw_arp, CTLFLAG_RW, 360 &pfil_ipfw_arp, 0, "Filter ARP packets through IPFW layer2"); 361 SYSCTL_INT(_net_link_bridge, OID_AUTO, pfil_bridge, CTLFLAG_RW, 362 &pfil_bridge, 0, "Packet filter on the bridge interface"); 363 SYSCTL_INT(_net_link_bridge, OID_AUTO, pfil_member, CTLFLAG_RW, 364 &pfil_member, 0, "Packet filter on the member interface"); 365 SYSCTL_INT(_net_link_bridge, OID_AUTO, pfil_local_phys, CTLFLAG_RW, 366 &pfil_local_phys, 0, 367 "Packet filter on the physical interface for locally destined packets"); 368 SYSCTL_INT(_net_link_bridge, OID_AUTO, log_stp, CTLFLAG_RW, 369 &log_stp, 0, "Log STP state changes"); 370 SYSCTL_INT(_net_link_bridge, OID_AUTO, inherit_mac, CTLFLAG_RW, 371 &bridge_inherit_mac, 0, 372 "Inherit MAC address from the first bridge member"); 373 374 struct bridge_control { 375 int (*bc_func)(struct bridge_softc *, void *); 376 int bc_argsize; 377 int bc_flags; 378 }; 379 380 #define BC_F_COPYIN 0x01 /* copy arguments in */ 381 #define BC_F_COPYOUT 0x02 /* copy arguments out */ 382 #define BC_F_SUSER 0x04 /* do super-user check */ 383 384 const struct bridge_control bridge_control_table[] = { 385 { bridge_ioctl_add, sizeof(struct ifbreq), 386 BC_F_COPYIN|BC_F_SUSER }, 387 { bridge_ioctl_del, sizeof(struct ifbreq), 388 BC_F_COPYIN|BC_F_SUSER }, 389 390 { bridge_ioctl_gifflags, sizeof(struct ifbreq), 391 BC_F_COPYIN|BC_F_COPYOUT }, 392 { bridge_ioctl_sifflags, sizeof(struct ifbreq), 393 BC_F_COPYIN|BC_F_SUSER }, 394 395 { bridge_ioctl_scache, sizeof(struct ifbrparam), 396 BC_F_COPYIN|BC_F_SUSER }, 397 { bridge_ioctl_gcache, sizeof(struct ifbrparam), 398 BC_F_COPYOUT }, 399 400 { bridge_ioctl_gifs, sizeof(struct ifbifconf), 401 BC_F_COPYIN|BC_F_COPYOUT }, 402 { bridge_ioctl_rts, sizeof(struct ifbaconf), 403 BC_F_COPYIN|BC_F_COPYOUT }, 404 405 { bridge_ioctl_saddr, sizeof(struct ifbareq), 406 BC_F_COPYIN|BC_F_SUSER }, 407 408 { bridge_ioctl_sto, sizeof(struct ifbrparam), 409 BC_F_COPYIN|BC_F_SUSER }, 410 { bridge_ioctl_gto, sizeof(struct ifbrparam), 411 BC_F_COPYOUT }, 412 413 { bridge_ioctl_daddr, sizeof(struct ifbareq), 414 BC_F_COPYIN|BC_F_SUSER }, 415 416 { bridge_ioctl_flush, sizeof(struct ifbreq), 417 BC_F_COPYIN|BC_F_SUSER }, 418 419 { bridge_ioctl_gpri, sizeof(struct ifbrparam), 420 BC_F_COPYOUT }, 421 { bridge_ioctl_spri, sizeof(struct ifbrparam), 422 BC_F_COPYIN|BC_F_SUSER }, 423 424 { bridge_ioctl_ght, sizeof(struct ifbrparam), 425 BC_F_COPYOUT }, 426 { bridge_ioctl_sht, sizeof(struct ifbrparam), 427 BC_F_COPYIN|BC_F_SUSER }, 428 429 { bridge_ioctl_gfd, sizeof(struct ifbrparam), 430 BC_F_COPYOUT }, 431 { bridge_ioctl_sfd, sizeof(struct ifbrparam), 432 BC_F_COPYIN|BC_F_SUSER }, 433 434 { bridge_ioctl_gma, sizeof(struct ifbrparam), 435 BC_F_COPYOUT }, 436 { bridge_ioctl_sma, sizeof(struct ifbrparam), 437 BC_F_COPYIN|BC_F_SUSER }, 438 439 { bridge_ioctl_sifprio, sizeof(struct ifbreq), 440 BC_F_COPYIN|BC_F_SUSER }, 441 442 { bridge_ioctl_sifcost, sizeof(struct ifbreq), 443 BC_F_COPYIN|BC_F_SUSER }, 444 445 { bridge_ioctl_addspan, sizeof(struct ifbreq), 446 BC_F_COPYIN|BC_F_SUSER }, 447 { bridge_ioctl_delspan, sizeof(struct ifbreq), 448 BC_F_COPYIN|BC_F_SUSER }, 449 450 { bridge_ioctl_gbparam, sizeof(struct ifbropreq), 451 BC_F_COPYOUT }, 452 453 { bridge_ioctl_grte, sizeof(struct ifbrparam), 454 BC_F_COPYOUT }, 455 456 { bridge_ioctl_gifsstp, sizeof(struct ifbpstpconf), 457 BC_F_COPYIN|BC_F_COPYOUT }, 458 459 { bridge_ioctl_sproto, sizeof(struct ifbrparam), 460 BC_F_COPYIN|BC_F_SUSER }, 461 462 { bridge_ioctl_stxhc, sizeof(struct ifbrparam), 463 BC_F_COPYIN|BC_F_SUSER }, 464 465 { bridge_ioctl_sifmaxaddr, sizeof(struct ifbreq), 466 BC_F_COPYIN|BC_F_SUSER }, 467 468 }; 469 const int bridge_control_table_size = 470 sizeof(bridge_control_table) / sizeof(bridge_control_table[0]); 471 472 LIST_HEAD(, bridge_softc) bridge_list; 473 474 IFC_SIMPLE_DECLARE(bridge, 0); 475 476 static int 477 bridge_modevent(module_t mod, int type, void *data) 478 { 479 480 switch (type) { 481 case MOD_LOAD: 482 mtx_init(&bridge_list_mtx, "if_bridge list", NULL, MTX_DEF); 483 if_clone_attach(&bridge_cloner); 484 bridge_rtnode_zone = uma_zcreate("bridge_rtnode", 485 sizeof(struct bridge_rtnode), NULL, NULL, NULL, NULL, 486 UMA_ALIGN_PTR, 0); 487 LIST_INIT(&bridge_list); 488 bridge_input_p = bridge_input; 489 bridge_output_p = bridge_output; 490 bridge_dn_p = bridge_dummynet; 491 bridge_detach_cookie = EVENTHANDLER_REGISTER( 492 ifnet_departure_event, bridge_ifdetach, NULL, 493 EVENTHANDLER_PRI_ANY); 494 break; 495 case MOD_UNLOAD: 496 EVENTHANDLER_DEREGISTER(ifnet_departure_event, 497 bridge_detach_cookie); 498 if_clone_detach(&bridge_cloner); 499 uma_zdestroy(bridge_rtnode_zone); 500 bridge_input_p = NULL; 501 bridge_output_p = NULL; 502 bridge_dn_p = NULL; 503 mtx_destroy(&bridge_list_mtx); 504 break; 505 default: 506 return (EOPNOTSUPP); 507 } 508 return (0); 509 } 510 511 static moduledata_t bridge_mod = { 512 "if_bridge", 513 bridge_modevent, 514 0 515 }; 516 517 DECLARE_MODULE(if_bridge, bridge_mod, SI_SUB_PSEUDO, SI_ORDER_ANY); 518 MODULE_DEPEND(if_bridge, bridgestp, 1, 1, 1); 519 520 /* 521 * handler for net.link.bridge.pfil_ipfw 522 */ 523 static int 524 sysctl_pfil_ipfw(SYSCTL_HANDLER_ARGS) 525 { 526 int enable = pfil_ipfw; 527 int error; 528 529 error = sysctl_handle_int(oidp, &enable, 0, req); 530 enable = (enable) ? 1 : 0; 531 532 if (enable != pfil_ipfw) { 533 pfil_ipfw = enable; 534 535 /* 536 * Disable pfil so that ipfw doesnt run twice, if the user 537 * really wants both then they can re-enable pfil_bridge and/or 538 * pfil_member. Also allow non-ip packets as ipfw can filter by 539 * layer2 type. 540 */ 541 if (pfil_ipfw) { 542 pfil_onlyip = 0; 543 pfil_bridge = 0; 544 pfil_member = 0; 545 } 546 } 547 548 return (error); 549 } 550 SYSCTL_PROC(_net_link_bridge, OID_AUTO, ipfw, CTLTYPE_INT|CTLFLAG_RW, 551 &pfil_ipfw, 0, &sysctl_pfil_ipfw, "I", "Layer2 filter with IPFW"); 552 553 /* 554 * bridge_clone_create: 555 * 556 * Create a new bridge instance. 557 */ 558 static int 559 bridge_clone_create(struct if_clone *ifc, int unit, caddr_t params) 560 { 561 struct bridge_softc *sc, *sc2; 562 struct ifnet *bifp, *ifp; 563 int retry; 564 565 sc = malloc(sizeof(*sc), M_DEVBUF, M_WAITOK|M_ZERO); 566 ifp = sc->sc_ifp = if_alloc(IFT_ETHER); 567 if (ifp == NULL) { 568 free(sc, M_DEVBUF); 569 return (ENOSPC); 570 } 571 572 BRIDGE_LOCK_INIT(sc); 573 sc->sc_brtmax = BRIDGE_RTABLE_MAX; 574 sc->sc_brttimeout = BRIDGE_RTABLE_TIMEOUT; 575 576 /* Initialize our routing table. */ 577 bridge_rtable_init(sc); 578 579 callout_init_mtx(&sc->sc_brcallout, &sc->sc_mtx, 0); 580 581 LIST_INIT(&sc->sc_iflist); 582 LIST_INIT(&sc->sc_spanlist); 583 584 ifp->if_softc = sc; 585 if_initname(ifp, ifc->ifc_name, unit); 586 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 587 ifp->if_ioctl = bridge_ioctl; 588 ifp->if_start = bridge_start; 589 ifp->if_init = bridge_init; 590 ifp->if_type = IFT_BRIDGE; 591 IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen); 592 ifp->if_snd.ifq_drv_maxlen = ifqmaxlen; 593 IFQ_SET_READY(&ifp->if_snd); 594 595 /* 596 * Generate a random ethernet address with a locally administered 597 * address. 598 * 599 * Since we are using random ethernet addresses for the bridge, it is 600 * possible that we might have address collisions, so make sure that 601 * this hardware address isn't already in use on another bridge. 602 */ 603 for (retry = 1; retry != 0;) { 604 arc4rand(sc->sc_defaddr, ETHER_ADDR_LEN, 1); 605 sc->sc_defaddr[0] &= ~1; /* clear multicast bit */ 606 sc->sc_defaddr[0] |= 2; /* set the LAA bit */ 607 retry = 0; 608 mtx_lock(&bridge_list_mtx); 609 LIST_FOREACH(sc2, &bridge_list, sc_list) { 610 bifp = sc2->sc_ifp; 611 if (memcmp(sc->sc_defaddr, 612 IF_LLADDR(bifp), ETHER_ADDR_LEN) == 0) 613 retry = 1; 614 } 615 mtx_unlock(&bridge_list_mtx); 616 } 617 618 bstp_attach(&sc->sc_stp, &bridge_ops); 619 ether_ifattach(ifp, sc->sc_defaddr); 620 /* Now undo some of the damage... */ 621 ifp->if_baudrate = 0; 622 ifp->if_type = IFT_BRIDGE; 623 624 mtx_lock(&bridge_list_mtx); 625 LIST_INSERT_HEAD(&bridge_list, sc, sc_list); 626 mtx_unlock(&bridge_list_mtx); 627 628 return (0); 629 } 630 631 /* 632 * bridge_clone_destroy: 633 * 634 * Destroy a bridge instance. 635 */ 636 static void 637 bridge_clone_destroy(struct ifnet *ifp) 638 { 639 struct bridge_softc *sc = ifp->if_softc; 640 struct bridge_iflist *bif; 641 642 BRIDGE_LOCK(sc); 643 644 bridge_stop(ifp, 1); 645 ifp->if_flags &= ~IFF_UP; 646 647 while ((bif = LIST_FIRST(&sc->sc_iflist)) != NULL) 648 bridge_delete_member(sc, bif, 0); 649 650 while ((bif = LIST_FIRST(&sc->sc_spanlist)) != NULL) { 651 bridge_delete_span(sc, bif); 652 } 653 654 BRIDGE_UNLOCK(sc); 655 656 callout_drain(&sc->sc_brcallout); 657 658 mtx_lock(&bridge_list_mtx); 659 LIST_REMOVE(sc, sc_list); 660 mtx_unlock(&bridge_list_mtx); 661 662 bstp_detach(&sc->sc_stp); 663 ether_ifdetach(ifp); 664 if_free_type(ifp, IFT_ETHER); 665 666 /* Tear down the routing table. */ 667 bridge_rtable_fini(sc); 668 669 BRIDGE_LOCK_DESTROY(sc); 670 free(sc, M_DEVBUF); 671 } 672 673 /* 674 * bridge_ioctl: 675 * 676 * Handle a control request from the operator. 677 */ 678 static int 679 bridge_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 680 { 681 struct bridge_softc *sc = ifp->if_softc; 682 struct ifreq *ifr = (struct ifreq *)data; 683 struct bridge_iflist *bif; 684 struct thread *td = curthread; 685 union { 686 struct ifbreq ifbreq; 687 struct ifbifconf ifbifconf; 688 struct ifbareq ifbareq; 689 struct ifbaconf ifbaconf; 690 struct ifbrparam ifbrparam; 691 struct ifbropreq ifbropreq; 692 } args; 693 struct ifdrv *ifd = (struct ifdrv *) data; 694 const struct bridge_control *bc; 695 int error = 0; 696 697 switch (cmd) { 698 699 case SIOCADDMULTI: 700 case SIOCDELMULTI: 701 break; 702 703 case SIOCGDRVSPEC: 704 case SIOCSDRVSPEC: 705 if (ifd->ifd_cmd >= bridge_control_table_size) { 706 error = EINVAL; 707 break; 708 } 709 bc = &bridge_control_table[ifd->ifd_cmd]; 710 711 if (cmd == SIOCGDRVSPEC && 712 (bc->bc_flags & BC_F_COPYOUT) == 0) { 713 error = EINVAL; 714 break; 715 } 716 else if (cmd == SIOCSDRVSPEC && 717 (bc->bc_flags & BC_F_COPYOUT) != 0) { 718 error = EINVAL; 719 break; 720 } 721 722 if (bc->bc_flags & BC_F_SUSER) { 723 error = priv_check(td, PRIV_NET_BRIDGE); 724 if (error) 725 break; 726 } 727 728 if (ifd->ifd_len != bc->bc_argsize || 729 ifd->ifd_len > sizeof(args)) { 730 error = EINVAL; 731 break; 732 } 733 734 bzero(&args, sizeof(args)); 735 if (bc->bc_flags & BC_F_COPYIN) { 736 error = copyin(ifd->ifd_data, &args, ifd->ifd_len); 737 if (error) 738 break; 739 } 740 741 BRIDGE_LOCK(sc); 742 error = (*bc->bc_func)(sc, &args); 743 BRIDGE_UNLOCK(sc); 744 if (error) 745 break; 746 747 if (bc->bc_flags & BC_F_COPYOUT) 748 error = copyout(&args, ifd->ifd_data, ifd->ifd_len); 749 750 break; 751 752 case SIOCSIFFLAGS: 753 if (!(ifp->if_flags & IFF_UP) && 754 (ifp->if_drv_flags & IFF_DRV_RUNNING)) { 755 /* 756 * If interface is marked down and it is running, 757 * then stop and disable it. 758 */ 759 BRIDGE_LOCK(sc); 760 bridge_stop(ifp, 1); 761 BRIDGE_UNLOCK(sc); 762 } else if ((ifp->if_flags & IFF_UP) && 763 !(ifp->if_drv_flags & IFF_DRV_RUNNING)) { 764 /* 765 * If interface is marked up and it is stopped, then 766 * start it. 767 */ 768 (*ifp->if_init)(sc); 769 } 770 break; 771 772 case SIOCSIFMTU: 773 if (ifr->ifr_mtu < 576) { 774 error = EINVAL; 775 break; 776 } 777 if (LIST_EMPTY(&sc->sc_iflist)) { 778 sc->sc_ifp->if_mtu = ifr->ifr_mtu; 779 break; 780 } 781 BRIDGE_LOCK(sc); 782 LIST_FOREACH(bif, &sc->sc_iflist, bif_next) { 783 if (bif->bif_ifp->if_mtu != ifr->ifr_mtu) { 784 log(LOG_NOTICE, "%s: invalid MTU: %lu(%s)" 785 " != %d\n", sc->sc_ifp->if_xname, 786 bif->bif_ifp->if_mtu, 787 bif->bif_ifp->if_xname, ifr->ifr_mtu); 788 error = EINVAL; 789 break; 790 } 791 } 792 if (!error) 793 sc->sc_ifp->if_mtu = ifr->ifr_mtu; 794 BRIDGE_UNLOCK(sc); 795 break; 796 default: 797 /* 798 * drop the lock as ether_ioctl() will call bridge_start() and 799 * cause the lock to be recursed. 800 */ 801 error = ether_ioctl(ifp, cmd, data); 802 break; 803 } 804 805 return (error); 806 } 807 808 /* 809 * bridge_mutecaps: 810 * 811 * Clear or restore unwanted capabilities on the member interface 812 */ 813 static void 814 bridge_mutecaps(struct bridge_softc *sc) 815 { 816 struct bridge_iflist *bif; 817 int enabled, mask; 818 819 /* Initial bitmask of capabilities to test */ 820 mask = BRIDGE_IFCAPS_MASK; 821 822 LIST_FOREACH(bif, &sc->sc_iflist, bif_next) { 823 /* Every member must support it or its disabled */ 824 mask &= bif->bif_savedcaps; 825 } 826 827 LIST_FOREACH(bif, &sc->sc_iflist, bif_next) { 828 enabled = bif->bif_ifp->if_capenable; 829 enabled &= ~BRIDGE_IFCAPS_STRIP; 830 /* strip off mask bits and enable them again if allowed */ 831 enabled &= ~BRIDGE_IFCAPS_MASK; 832 enabled |= mask; 833 bridge_set_ifcap(sc, bif, enabled); 834 } 835 836 } 837 838 static void 839 bridge_set_ifcap(struct bridge_softc *sc, struct bridge_iflist *bif, int set) 840 { 841 struct ifnet *ifp = bif->bif_ifp; 842 struct ifreq ifr; 843 int error; 844 845 bzero(&ifr, sizeof(ifr)); 846 ifr.ifr_reqcap = set; 847 848 if (ifp->if_capenable != set) { 849 error = (*ifp->if_ioctl)(ifp, SIOCSIFCAP, (caddr_t)&ifr); 850 if (error) 851 if_printf(sc->sc_ifp, 852 "error setting interface capabilities on %s\n", 853 ifp->if_xname); 854 } 855 } 856 857 /* 858 * bridge_lookup_member: 859 * 860 * Lookup a bridge member interface. 861 */ 862 static struct bridge_iflist * 863 bridge_lookup_member(struct bridge_softc *sc, const char *name) 864 { 865 struct bridge_iflist *bif; 866 struct ifnet *ifp; 867 868 BRIDGE_LOCK_ASSERT(sc); 869 870 LIST_FOREACH(bif, &sc->sc_iflist, bif_next) { 871 ifp = bif->bif_ifp; 872 if (strcmp(ifp->if_xname, name) == 0) 873 return (bif); 874 } 875 876 return (NULL); 877 } 878 879 /* 880 * bridge_lookup_member_if: 881 * 882 * Lookup a bridge member interface by ifnet*. 883 */ 884 static struct bridge_iflist * 885 bridge_lookup_member_if(struct bridge_softc *sc, struct ifnet *member_ifp) 886 { 887 struct bridge_iflist *bif; 888 889 BRIDGE_LOCK_ASSERT(sc); 890 891 LIST_FOREACH(bif, &sc->sc_iflist, bif_next) { 892 if (bif->bif_ifp == member_ifp) 893 return (bif); 894 } 895 896 return (NULL); 897 } 898 899 /* 900 * bridge_delete_member: 901 * 902 * Delete the specified member interface. 903 */ 904 static void 905 bridge_delete_member(struct bridge_softc *sc, struct bridge_iflist *bif, 906 int gone) 907 { 908 struct ifnet *ifs = bif->bif_ifp; 909 struct ifnet *fif = NULL; 910 911 BRIDGE_LOCK_ASSERT(sc); 912 913 if (bif->bif_flags & IFBIF_STP) 914 bstp_disable(&bif->bif_stp); 915 916 ifs->if_bridge = NULL; 917 BRIDGE_XLOCK(sc); 918 LIST_REMOVE(bif, bif_next); 919 BRIDGE_XDROP(sc); 920 921 /* 922 * If removing the interface that gave the bridge its mac address, set 923 * the mac address of the bridge to the address of the next member, or 924 * to its default address if no members are left. 925 */ 926 if (bridge_inherit_mac && sc->sc_ifaddr == ifs) { 927 if (LIST_EMPTY(&sc->sc_iflist)) { 928 bcopy(sc->sc_defaddr, 929 IF_LLADDR(sc->sc_ifp), ETHER_ADDR_LEN); 930 sc->sc_ifaddr = NULL; 931 } else { 932 fif = LIST_FIRST(&sc->sc_iflist)->bif_ifp; 933 bcopy(IF_LLADDR(fif), 934 IF_LLADDR(sc->sc_ifp), ETHER_ADDR_LEN); 935 sc->sc_ifaddr = fif; 936 } 937 EVENTHANDLER_INVOKE(iflladdr_event, sc->sc_ifp); 938 } 939 940 bridge_mutecaps(sc); /* recalcuate now this interface is removed */ 941 bridge_rtdelete(sc, ifs, IFBF_FLUSHALL); 942 KASSERT(bif->bif_addrcnt == 0, 943 ("%s: %d bridge routes referenced", __func__, bif->bif_addrcnt)); 944 945 BRIDGE_UNLOCK(sc); 946 if (!gone) { 947 switch (ifs->if_type) { 948 case IFT_ETHER: 949 case IFT_L2VLAN: 950 /* 951 * Take the interface out of promiscuous mode. 952 */ 953 (void) ifpromisc(ifs, 0); 954 break; 955 956 case IFT_GIF: 957 break; 958 959 default: 960 #ifdef DIAGNOSTIC 961 panic("bridge_delete_member: impossible"); 962 #endif 963 break; 964 } 965 /* reneable any interface capabilities */ 966 bridge_set_ifcap(sc, bif, bif->bif_savedcaps); 967 } 968 bstp_destroy(&bif->bif_stp); /* prepare to free */ 969 BRIDGE_LOCK(sc); 970 free(bif, M_DEVBUF); 971 } 972 973 /* 974 * bridge_delete_span: 975 * 976 * Delete the specified span interface. 977 */ 978 static void 979 bridge_delete_span(struct bridge_softc *sc, struct bridge_iflist *bif) 980 { 981 BRIDGE_LOCK_ASSERT(sc); 982 983 KASSERT(bif->bif_ifp->if_bridge == NULL, 984 ("%s: not a span interface", __func__)); 985 986 LIST_REMOVE(bif, bif_next); 987 free(bif, M_DEVBUF); 988 } 989 990 static int 991 bridge_ioctl_add(struct bridge_softc *sc, void *arg) 992 { 993 struct ifbreq *req = arg; 994 struct bridge_iflist *bif = NULL; 995 struct ifnet *ifs; 996 int error = 0; 997 998 ifs = ifunit(req->ifbr_ifsname); 999 if (ifs == NULL) 1000 return (ENOENT); 1001 if (ifs->if_ioctl == NULL) /* must be supported */ 1002 return (EINVAL); 1003 1004 /* If it's in the span list, it can't be a member. */ 1005 LIST_FOREACH(bif, &sc->sc_spanlist, bif_next) 1006 if (ifs == bif->bif_ifp) 1007 return (EBUSY); 1008 1009 if (ifs->if_bridge == sc) 1010 return (EEXIST); 1011 1012 if (ifs->if_bridge != NULL) 1013 return (EBUSY); 1014 1015 bif = malloc(sizeof(*bif), M_DEVBUF, M_NOWAIT|M_ZERO); 1016 if (bif == NULL) 1017 return (ENOMEM); 1018 1019 bif->bif_ifp = ifs; 1020 bif->bif_flags = IFBIF_LEARNING | IFBIF_DISCOVER; 1021 bif->bif_savedcaps = ifs->if_capenable; 1022 1023 switch (ifs->if_type) { 1024 case IFT_ETHER: 1025 case IFT_L2VLAN: 1026 case IFT_GIF: 1027 /* permitted interface types */ 1028 break; 1029 default: 1030 error = EINVAL; 1031 goto out; 1032 } 1033 1034 /* Allow the first Ethernet member to define the MTU */ 1035 if (LIST_EMPTY(&sc->sc_iflist)) 1036 sc->sc_ifp->if_mtu = ifs->if_mtu; 1037 else if (sc->sc_ifp->if_mtu != ifs->if_mtu) { 1038 if_printf(sc->sc_ifp, "invalid MTU: %lu(%s) != %lu\n", 1039 ifs->if_mtu, ifs->if_xname, sc->sc_ifp->if_mtu); 1040 error = EINVAL; 1041 goto out; 1042 } 1043 1044 /* 1045 * Assign the interface's MAC address to the bridge if it's the first 1046 * member and the MAC address of the bridge has not been changed from 1047 * the default randomly generated one. 1048 */ 1049 if (bridge_inherit_mac && LIST_EMPTY(&sc->sc_iflist) && 1050 !memcmp(IF_LLADDR(sc->sc_ifp), sc->sc_defaddr, ETHER_ADDR_LEN)) { 1051 bcopy(IF_LLADDR(ifs), IF_LLADDR(sc->sc_ifp), ETHER_ADDR_LEN); 1052 sc->sc_ifaddr = ifs; 1053 EVENTHANDLER_INVOKE(iflladdr_event, sc->sc_ifp); 1054 } 1055 1056 ifs->if_bridge = sc; 1057 bstp_create(&sc->sc_stp, &bif->bif_stp, bif->bif_ifp); 1058 /* 1059 * XXX: XLOCK HERE!?! 1060 * 1061 * NOTE: insert_***HEAD*** should be safe for the traversals. 1062 */ 1063 LIST_INSERT_HEAD(&sc->sc_iflist, bif, bif_next); 1064 1065 /* Set interface capabilities to the intersection set of all members */ 1066 bridge_mutecaps(sc); 1067 1068 switch (ifs->if_type) { 1069 case IFT_ETHER: 1070 case IFT_L2VLAN: 1071 /* 1072 * Place the interface into promiscuous mode. 1073 */ 1074 BRIDGE_UNLOCK(sc); 1075 error = ifpromisc(ifs, 1); 1076 BRIDGE_LOCK(sc); 1077 break; 1078 } 1079 if (error) 1080 bridge_delete_member(sc, bif, 0); 1081 out: 1082 if (error) { 1083 if (bif != NULL) 1084 free(bif, M_DEVBUF); 1085 } 1086 return (error); 1087 } 1088 1089 static int 1090 bridge_ioctl_del(struct bridge_softc *sc, void *arg) 1091 { 1092 struct ifbreq *req = arg; 1093 struct bridge_iflist *bif; 1094 1095 bif = bridge_lookup_member(sc, req->ifbr_ifsname); 1096 if (bif == NULL) 1097 return (ENOENT); 1098 1099 bridge_delete_member(sc, bif, 0); 1100 1101 return (0); 1102 } 1103 1104 static int 1105 bridge_ioctl_gifflags(struct bridge_softc *sc, void *arg) 1106 { 1107 struct ifbreq *req = arg; 1108 struct bridge_iflist *bif; 1109 struct bstp_port *bp; 1110 1111 bif = bridge_lookup_member(sc, req->ifbr_ifsname); 1112 if (bif == NULL) 1113 return (ENOENT); 1114 1115 bp = &bif->bif_stp; 1116 req->ifbr_ifsflags = bif->bif_flags; 1117 req->ifbr_state = bp->bp_state; 1118 req->ifbr_priority = bp->bp_priority; 1119 req->ifbr_path_cost = bp->bp_path_cost; 1120 req->ifbr_portno = bif->bif_ifp->if_index & 0xfff; 1121 req->ifbr_proto = bp->bp_protover; 1122 req->ifbr_role = bp->bp_role; 1123 req->ifbr_stpflags = bp->bp_flags; 1124 req->ifbr_addrcnt = bif->bif_addrcnt; 1125 req->ifbr_addrmax = bif->bif_addrmax; 1126 req->ifbr_addrexceeded = bif->bif_addrexceeded; 1127 1128 /* Copy STP state options as flags */ 1129 if (bp->bp_operedge) 1130 req->ifbr_ifsflags |= IFBIF_BSTP_EDGE; 1131 if (bp->bp_flags & BSTP_PORT_AUTOEDGE) 1132 req->ifbr_ifsflags |= IFBIF_BSTP_AUTOEDGE; 1133 if (bp->bp_ptp_link) 1134 req->ifbr_ifsflags |= IFBIF_BSTP_PTP; 1135 if (bp->bp_flags & BSTP_PORT_AUTOPTP) 1136 req->ifbr_ifsflags |= IFBIF_BSTP_AUTOPTP; 1137 if (bp->bp_flags & BSTP_PORT_ADMEDGE) 1138 req->ifbr_ifsflags |= IFBIF_BSTP_ADMEDGE; 1139 if (bp->bp_flags & BSTP_PORT_ADMCOST) 1140 req->ifbr_ifsflags |= IFBIF_BSTP_ADMCOST; 1141 return (0); 1142 } 1143 1144 static int 1145 bridge_ioctl_sifflags(struct bridge_softc *sc, void *arg) 1146 { 1147 struct ifbreq *req = arg; 1148 struct bridge_iflist *bif; 1149 struct bstp_port *bp; 1150 int error; 1151 1152 bif = bridge_lookup_member(sc, req->ifbr_ifsname); 1153 if (bif == NULL) 1154 return (ENOENT); 1155 bp = &bif->bif_stp; 1156 1157 if (req->ifbr_ifsflags & IFBIF_SPAN) 1158 /* SPAN is readonly */ 1159 return (EINVAL); 1160 1161 if (req->ifbr_ifsflags & IFBIF_STP) { 1162 if ((bif->bif_flags & IFBIF_STP) == 0) { 1163 error = bstp_enable(&bif->bif_stp); 1164 if (error) 1165 return (error); 1166 } 1167 } else { 1168 if ((bif->bif_flags & IFBIF_STP) != 0) 1169 bstp_disable(&bif->bif_stp); 1170 } 1171 1172 /* Pass on STP flags */ 1173 bstp_set_edge(bp, req->ifbr_ifsflags & IFBIF_BSTP_EDGE ? 1 : 0); 1174 bstp_set_autoedge(bp, req->ifbr_ifsflags & IFBIF_BSTP_AUTOEDGE ? 1 : 0); 1175 bstp_set_ptp(bp, req->ifbr_ifsflags & IFBIF_BSTP_PTP ? 1 : 0); 1176 bstp_set_autoptp(bp, req->ifbr_ifsflags & IFBIF_BSTP_AUTOPTP ? 1 : 0); 1177 1178 /* Save the bits relating to the bridge */ 1179 bif->bif_flags = req->ifbr_ifsflags & IFBIFMASK; 1180 1181 return (0); 1182 } 1183 1184 static int 1185 bridge_ioctl_scache(struct bridge_softc *sc, void *arg) 1186 { 1187 struct ifbrparam *param = arg; 1188 1189 sc->sc_brtmax = param->ifbrp_csize; 1190 bridge_rttrim(sc); 1191 1192 return (0); 1193 } 1194 1195 static int 1196 bridge_ioctl_gcache(struct bridge_softc *sc, void *arg) 1197 { 1198 struct ifbrparam *param = arg; 1199 1200 param->ifbrp_csize = sc->sc_brtmax; 1201 1202 return (0); 1203 } 1204 1205 static int 1206 bridge_ioctl_gifs(struct bridge_softc *sc, void *arg) 1207 { 1208 struct ifbifconf *bifc = arg; 1209 struct bridge_iflist *bif; 1210 struct ifbreq breq; 1211 char *buf, *outbuf; 1212 int count, buflen, len, error = 0; 1213 1214 count = 0; 1215 LIST_FOREACH(bif, &sc->sc_iflist, bif_next) 1216 count++; 1217 LIST_FOREACH(bif, &sc->sc_spanlist, bif_next) 1218 count++; 1219 1220 buflen = sizeof(breq) * count; 1221 if (bifc->ifbic_len == 0) { 1222 bifc->ifbic_len = buflen; 1223 return (0); 1224 } 1225 BRIDGE_UNLOCK(sc); 1226 outbuf = malloc(buflen, M_TEMP, M_WAITOK | M_ZERO); 1227 BRIDGE_LOCK(sc); 1228 1229 count = 0; 1230 buf = outbuf; 1231 len = min(bifc->ifbic_len, buflen); 1232 bzero(&breq, sizeof(breq)); 1233 LIST_FOREACH(bif, &sc->sc_iflist, bif_next) { 1234 if (len < sizeof(breq)) 1235 break; 1236 1237 strlcpy(breq.ifbr_ifsname, bif->bif_ifp->if_xname, 1238 sizeof(breq.ifbr_ifsname)); 1239 /* Fill in the ifbreq structure */ 1240 error = bridge_ioctl_gifflags(sc, &breq); 1241 if (error) 1242 break; 1243 memcpy(buf, &breq, sizeof(breq)); 1244 count++; 1245 buf += sizeof(breq); 1246 len -= sizeof(breq); 1247 } 1248 LIST_FOREACH(bif, &sc->sc_spanlist, bif_next) { 1249 if (len < sizeof(breq)) 1250 break; 1251 1252 strlcpy(breq.ifbr_ifsname, bif->bif_ifp->if_xname, 1253 sizeof(breq.ifbr_ifsname)); 1254 breq.ifbr_ifsflags = bif->bif_flags; 1255 breq.ifbr_portno = bif->bif_ifp->if_index & 0xfff; 1256 memcpy(buf, &breq, sizeof(breq)); 1257 count++; 1258 buf += sizeof(breq); 1259 len -= sizeof(breq); 1260 } 1261 1262 BRIDGE_UNLOCK(sc); 1263 bifc->ifbic_len = sizeof(breq) * count; 1264 error = copyout(outbuf, bifc->ifbic_req, bifc->ifbic_len); 1265 BRIDGE_LOCK(sc); 1266 free(outbuf, M_TEMP); 1267 return (error); 1268 } 1269 1270 static int 1271 bridge_ioctl_rts(struct bridge_softc *sc, void *arg) 1272 { 1273 struct ifbaconf *bac = arg; 1274 struct bridge_rtnode *brt; 1275 struct ifbareq bareq; 1276 char *buf, *outbuf; 1277 int count, buflen, len, error = 0; 1278 1279 if (bac->ifbac_len == 0) 1280 return (0); 1281 1282 count = 0; 1283 LIST_FOREACH(brt, &sc->sc_rtlist, brt_list) 1284 count++; 1285 buflen = sizeof(bareq) * count; 1286 1287 BRIDGE_UNLOCK(sc); 1288 outbuf = malloc(buflen, M_TEMP, M_WAITOK | M_ZERO); 1289 BRIDGE_LOCK(sc); 1290 1291 count = 0; 1292 buf = outbuf; 1293 len = min(bac->ifbac_len, buflen); 1294 bzero(&bareq, sizeof(bareq)); 1295 LIST_FOREACH(brt, &sc->sc_rtlist, brt_list) { 1296 if (len < sizeof(bareq)) 1297 goto out; 1298 strlcpy(bareq.ifba_ifsname, brt->brt_ifp->if_xname, 1299 sizeof(bareq.ifba_ifsname)); 1300 memcpy(bareq.ifba_dst, brt->brt_addr, sizeof(brt->brt_addr)); 1301 bareq.ifba_vlan = brt->brt_vlan; 1302 if ((brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC && 1303 time_uptime < brt->brt_expire) 1304 bareq.ifba_expire = brt->brt_expire - time_uptime; 1305 else 1306 bareq.ifba_expire = 0; 1307 bareq.ifba_flags = brt->brt_flags; 1308 1309 memcpy(buf, &bareq, sizeof(bareq)); 1310 count++; 1311 buf += sizeof(bareq); 1312 len -= sizeof(bareq); 1313 } 1314 out: 1315 BRIDGE_UNLOCK(sc); 1316 bac->ifbac_len = sizeof(bareq) * count; 1317 error = copyout(outbuf, bac->ifbac_req, bac->ifbac_len); 1318 BRIDGE_LOCK(sc); 1319 free(outbuf, M_TEMP); 1320 return (error); 1321 } 1322 1323 static int 1324 bridge_ioctl_saddr(struct bridge_softc *sc, void *arg) 1325 { 1326 struct ifbareq *req = arg; 1327 struct bridge_iflist *bif; 1328 int error; 1329 1330 bif = bridge_lookup_member(sc, req->ifba_ifsname); 1331 if (bif == NULL) 1332 return (ENOENT); 1333 1334 error = bridge_rtupdate(sc, req->ifba_dst, req->ifba_vlan, bif, 1, 1335 req->ifba_flags); 1336 1337 return (error); 1338 } 1339 1340 static int 1341 bridge_ioctl_sto(struct bridge_softc *sc, void *arg) 1342 { 1343 struct ifbrparam *param = arg; 1344 1345 sc->sc_brttimeout = param->ifbrp_ctime; 1346 return (0); 1347 } 1348 1349 static int 1350 bridge_ioctl_gto(struct bridge_softc *sc, void *arg) 1351 { 1352 struct ifbrparam *param = arg; 1353 1354 param->ifbrp_ctime = sc->sc_brttimeout; 1355 return (0); 1356 } 1357 1358 static int 1359 bridge_ioctl_daddr(struct bridge_softc *sc, void *arg) 1360 { 1361 struct ifbareq *req = arg; 1362 1363 return (bridge_rtdaddr(sc, req->ifba_dst, req->ifba_vlan)); 1364 } 1365 1366 static int 1367 bridge_ioctl_flush(struct bridge_softc *sc, void *arg) 1368 { 1369 struct ifbreq *req = arg; 1370 1371 bridge_rtflush(sc, req->ifbr_ifsflags); 1372 return (0); 1373 } 1374 1375 static int 1376 bridge_ioctl_gpri(struct bridge_softc *sc, void *arg) 1377 { 1378 struct ifbrparam *param = arg; 1379 struct bstp_state *bs = &sc->sc_stp; 1380 1381 param->ifbrp_prio = bs->bs_bridge_priority; 1382 return (0); 1383 } 1384 1385 static int 1386 bridge_ioctl_spri(struct bridge_softc *sc, void *arg) 1387 { 1388 struct ifbrparam *param = arg; 1389 1390 return (bstp_set_priority(&sc->sc_stp, param->ifbrp_prio)); 1391 } 1392 1393 static int 1394 bridge_ioctl_ght(struct bridge_softc *sc, void *arg) 1395 { 1396 struct ifbrparam *param = arg; 1397 struct bstp_state *bs = &sc->sc_stp; 1398 1399 param->ifbrp_hellotime = bs->bs_bridge_htime >> 8; 1400 return (0); 1401 } 1402 1403 static int 1404 bridge_ioctl_sht(struct bridge_softc *sc, void *arg) 1405 { 1406 struct ifbrparam *param = arg; 1407 1408 return (bstp_set_htime(&sc->sc_stp, param->ifbrp_hellotime)); 1409 } 1410 1411 static int 1412 bridge_ioctl_gfd(struct bridge_softc *sc, void *arg) 1413 { 1414 struct ifbrparam *param = arg; 1415 struct bstp_state *bs = &sc->sc_stp; 1416 1417 param->ifbrp_fwddelay = bs->bs_bridge_fdelay >> 8; 1418 return (0); 1419 } 1420 1421 static int 1422 bridge_ioctl_sfd(struct bridge_softc *sc, void *arg) 1423 { 1424 struct ifbrparam *param = arg; 1425 1426 return (bstp_set_fdelay(&sc->sc_stp, param->ifbrp_fwddelay)); 1427 } 1428 1429 static int 1430 bridge_ioctl_gma(struct bridge_softc *sc, void *arg) 1431 { 1432 struct ifbrparam *param = arg; 1433 struct bstp_state *bs = &sc->sc_stp; 1434 1435 param->ifbrp_maxage = bs->bs_bridge_max_age >> 8; 1436 return (0); 1437 } 1438 1439 static int 1440 bridge_ioctl_sma(struct bridge_softc *sc, void *arg) 1441 { 1442 struct ifbrparam *param = arg; 1443 1444 return (bstp_set_maxage(&sc->sc_stp, param->ifbrp_maxage)); 1445 } 1446 1447 static int 1448 bridge_ioctl_sifprio(struct bridge_softc *sc, void *arg) 1449 { 1450 struct ifbreq *req = arg; 1451 struct bridge_iflist *bif; 1452 1453 bif = bridge_lookup_member(sc, req->ifbr_ifsname); 1454 if (bif == NULL) 1455 return (ENOENT); 1456 1457 return (bstp_set_port_priority(&bif->bif_stp, req->ifbr_priority)); 1458 } 1459 1460 static int 1461 bridge_ioctl_sifcost(struct bridge_softc *sc, void *arg) 1462 { 1463 struct ifbreq *req = arg; 1464 struct bridge_iflist *bif; 1465 1466 bif = bridge_lookup_member(sc, req->ifbr_ifsname); 1467 if (bif == NULL) 1468 return (ENOENT); 1469 1470 return (bstp_set_path_cost(&bif->bif_stp, req->ifbr_path_cost)); 1471 } 1472 1473 static int 1474 bridge_ioctl_sifmaxaddr(struct bridge_softc *sc, void *arg) 1475 { 1476 struct ifbreq *req = arg; 1477 struct bridge_iflist *bif; 1478 1479 bif = bridge_lookup_member(sc, req->ifbr_ifsname); 1480 if (bif == NULL) 1481 return (ENOENT); 1482 1483 bif->bif_addrmax = req->ifbr_addrmax; 1484 return (0); 1485 } 1486 1487 static int 1488 bridge_ioctl_addspan(struct bridge_softc *sc, void *arg) 1489 { 1490 struct ifbreq *req = arg; 1491 struct bridge_iflist *bif = NULL; 1492 struct ifnet *ifs; 1493 1494 ifs = ifunit(req->ifbr_ifsname); 1495 if (ifs == NULL) 1496 return (ENOENT); 1497 1498 LIST_FOREACH(bif, &sc->sc_spanlist, bif_next) 1499 if (ifs == bif->bif_ifp) 1500 return (EBUSY); 1501 1502 if (ifs->if_bridge != NULL) 1503 return (EBUSY); 1504 1505 switch (ifs->if_type) { 1506 case IFT_ETHER: 1507 case IFT_GIF: 1508 case IFT_L2VLAN: 1509 break; 1510 default: 1511 return (EINVAL); 1512 } 1513 1514 bif = malloc(sizeof(*bif), M_DEVBUF, M_NOWAIT|M_ZERO); 1515 if (bif == NULL) 1516 return (ENOMEM); 1517 1518 bif->bif_ifp = ifs; 1519 bif->bif_flags = IFBIF_SPAN; 1520 1521 LIST_INSERT_HEAD(&sc->sc_spanlist, bif, bif_next); 1522 1523 return (0); 1524 } 1525 1526 static int 1527 bridge_ioctl_delspan(struct bridge_softc *sc, void *arg) 1528 { 1529 struct ifbreq *req = arg; 1530 struct bridge_iflist *bif; 1531 struct ifnet *ifs; 1532 1533 ifs = ifunit(req->ifbr_ifsname); 1534 if (ifs == NULL) 1535 return (ENOENT); 1536 1537 LIST_FOREACH(bif, &sc->sc_spanlist, bif_next) 1538 if (ifs == bif->bif_ifp) 1539 break; 1540 1541 if (bif == NULL) 1542 return (ENOENT); 1543 1544 bridge_delete_span(sc, bif); 1545 1546 return (0); 1547 } 1548 1549 static int 1550 bridge_ioctl_gbparam(struct bridge_softc *sc, void *arg) 1551 { 1552 struct ifbropreq *req = arg; 1553 struct bstp_state *bs = &sc->sc_stp; 1554 struct bstp_port *root_port; 1555 1556 req->ifbop_maxage = bs->bs_bridge_max_age >> 8; 1557 req->ifbop_hellotime = bs->bs_bridge_htime >> 8; 1558 req->ifbop_fwddelay = bs->bs_bridge_fdelay >> 8; 1559 1560 root_port = bs->bs_root_port; 1561 if (root_port == NULL) 1562 req->ifbop_root_port = 0; 1563 else 1564 req->ifbop_root_port = root_port->bp_ifp->if_index; 1565 1566 req->ifbop_holdcount = bs->bs_txholdcount; 1567 req->ifbop_priority = bs->bs_bridge_priority; 1568 req->ifbop_protocol = bs->bs_protover; 1569 req->ifbop_root_path_cost = bs->bs_root_pv.pv_cost; 1570 req->ifbop_bridgeid = bs->bs_bridge_pv.pv_dbridge_id; 1571 req->ifbop_designated_root = bs->bs_root_pv.pv_root_id; 1572 req->ifbop_designated_bridge = bs->bs_root_pv.pv_dbridge_id; 1573 req->ifbop_last_tc_time.tv_sec = bs->bs_last_tc_time.tv_sec; 1574 req->ifbop_last_tc_time.tv_usec = bs->bs_last_tc_time.tv_usec; 1575 1576 return (0); 1577 } 1578 1579 static int 1580 bridge_ioctl_grte(struct bridge_softc *sc, void *arg) 1581 { 1582 struct ifbrparam *param = arg; 1583 1584 param->ifbrp_cexceeded = sc->sc_brtexceeded; 1585 return (0); 1586 } 1587 1588 static int 1589 bridge_ioctl_gifsstp(struct bridge_softc *sc, void *arg) 1590 { 1591 struct ifbpstpconf *bifstp = arg; 1592 struct bridge_iflist *bif; 1593 struct bstp_port *bp; 1594 struct ifbpstpreq bpreq; 1595 char *buf, *outbuf; 1596 int count, buflen, len, error = 0; 1597 1598 count = 0; 1599 LIST_FOREACH(bif, &sc->sc_iflist, bif_next) { 1600 if ((bif->bif_flags & IFBIF_STP) != 0) 1601 count++; 1602 } 1603 1604 buflen = sizeof(bpreq) * count; 1605 if (bifstp->ifbpstp_len == 0) { 1606 bifstp->ifbpstp_len = buflen; 1607 return (0); 1608 } 1609 1610 BRIDGE_UNLOCK(sc); 1611 outbuf = malloc(buflen, M_TEMP, M_WAITOK | M_ZERO); 1612 BRIDGE_LOCK(sc); 1613 1614 count = 0; 1615 buf = outbuf; 1616 len = min(bifstp->ifbpstp_len, buflen); 1617 bzero(&bpreq, sizeof(bpreq)); 1618 LIST_FOREACH(bif, &sc->sc_iflist, bif_next) { 1619 if (len < sizeof(bpreq)) 1620 break; 1621 1622 if ((bif->bif_flags & IFBIF_STP) == 0) 1623 continue; 1624 1625 bp = &bif->bif_stp; 1626 bpreq.ifbp_portno = bif->bif_ifp->if_index & 0xfff; 1627 bpreq.ifbp_fwd_trans = bp->bp_forward_transitions; 1628 bpreq.ifbp_design_cost = bp->bp_desg_pv.pv_cost; 1629 bpreq.ifbp_design_port = bp->bp_desg_pv.pv_port_id; 1630 bpreq.ifbp_design_bridge = bp->bp_desg_pv.pv_dbridge_id; 1631 bpreq.ifbp_design_root = bp->bp_desg_pv.pv_root_id; 1632 1633 memcpy(buf, &bpreq, sizeof(bpreq)); 1634 count++; 1635 buf += sizeof(bpreq); 1636 len -= sizeof(bpreq); 1637 } 1638 1639 BRIDGE_UNLOCK(sc); 1640 bifstp->ifbpstp_len = sizeof(bpreq) * count; 1641 error = copyout(outbuf, bifstp->ifbpstp_req, bifstp->ifbpstp_len); 1642 BRIDGE_LOCK(sc); 1643 free(outbuf, M_TEMP); 1644 return (error); 1645 } 1646 1647 static int 1648 bridge_ioctl_sproto(struct bridge_softc *sc, void *arg) 1649 { 1650 struct ifbrparam *param = arg; 1651 1652 return (bstp_set_protocol(&sc->sc_stp, param->ifbrp_proto)); 1653 } 1654 1655 static int 1656 bridge_ioctl_stxhc(struct bridge_softc *sc, void *arg) 1657 { 1658 struct ifbrparam *param = arg; 1659 1660 return (bstp_set_holdcount(&sc->sc_stp, param->ifbrp_txhc)); 1661 } 1662 1663 /* 1664 * bridge_ifdetach: 1665 * 1666 * Detach an interface from a bridge. Called when a member 1667 * interface is detaching. 1668 */ 1669 static void 1670 bridge_ifdetach(void *arg __unused, struct ifnet *ifp) 1671 { 1672 struct bridge_softc *sc = ifp->if_bridge; 1673 struct bridge_iflist *bif; 1674 1675 /* Check if the interface is a bridge member */ 1676 if (sc != NULL) { 1677 BRIDGE_LOCK(sc); 1678 1679 bif = bridge_lookup_member_if(sc, ifp); 1680 if (bif != NULL) 1681 bridge_delete_member(sc, bif, 1); 1682 1683 BRIDGE_UNLOCK(sc); 1684 return; 1685 } 1686 1687 /* Check if the interface is a span port */ 1688 mtx_lock(&bridge_list_mtx); 1689 LIST_FOREACH(sc, &bridge_list, sc_list) { 1690 BRIDGE_LOCK(sc); 1691 LIST_FOREACH(bif, &sc->sc_spanlist, bif_next) 1692 if (ifp == bif->bif_ifp) { 1693 bridge_delete_span(sc, bif); 1694 break; 1695 } 1696 1697 BRIDGE_UNLOCK(sc); 1698 } 1699 mtx_unlock(&bridge_list_mtx); 1700 } 1701 1702 /* 1703 * bridge_init: 1704 * 1705 * Initialize a bridge interface. 1706 */ 1707 static void 1708 bridge_init(void *xsc) 1709 { 1710 struct bridge_softc *sc = (struct bridge_softc *)xsc; 1711 struct ifnet *ifp = sc->sc_ifp; 1712 1713 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 1714 return; 1715 1716 BRIDGE_LOCK(sc); 1717 callout_reset(&sc->sc_brcallout, bridge_rtable_prune_period * hz, 1718 bridge_timer, sc); 1719 1720 ifp->if_drv_flags |= IFF_DRV_RUNNING; 1721 bstp_init(&sc->sc_stp); /* Initialize Spanning Tree */ 1722 1723 BRIDGE_UNLOCK(sc); 1724 } 1725 1726 /* 1727 * bridge_stop: 1728 * 1729 * Stop the bridge interface. 1730 */ 1731 static void 1732 bridge_stop(struct ifnet *ifp, int disable) 1733 { 1734 struct bridge_softc *sc = ifp->if_softc; 1735 1736 BRIDGE_LOCK_ASSERT(sc); 1737 1738 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) 1739 return; 1740 1741 callout_stop(&sc->sc_brcallout); 1742 bstp_stop(&sc->sc_stp); 1743 1744 bridge_rtflush(sc, IFBF_FLUSHDYN); 1745 1746 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1747 } 1748 1749 /* 1750 * bridge_enqueue: 1751 * 1752 * Enqueue a packet on a bridge member interface. 1753 * 1754 */ 1755 static void 1756 bridge_enqueue(struct bridge_softc *sc, struct ifnet *dst_ifp, struct mbuf *m) 1757 { 1758 int len, err = 0; 1759 short mflags; 1760 struct mbuf *m0; 1761 1762 len = m->m_pkthdr.len; 1763 mflags = m->m_flags; 1764 1765 /* We may be sending a fragment so traverse the mbuf */ 1766 for (; m; m = m0) { 1767 m0 = m->m_nextpkt; 1768 m->m_nextpkt = NULL; 1769 1770 /* 1771 * If underlying interface can not do VLAN tag insertion itself 1772 * then attach a packet tag that holds it. 1773 */ 1774 if ((m->m_flags & M_VLANTAG) && 1775 (dst_ifp->if_capenable & IFCAP_VLAN_HWTAGGING) == 0) { 1776 m = ether_vlanencap(m, m->m_pkthdr.ether_vtag); 1777 if (m == NULL) { 1778 if_printf(dst_ifp, 1779 "unable to prepend VLAN header\n"); 1780 dst_ifp->if_oerrors++; 1781 continue; 1782 } 1783 m->m_flags &= ~M_VLANTAG; 1784 } 1785 1786 if (err == 0) 1787 dst_ifp->if_transmit(dst_ifp, m); 1788 } 1789 1790 if (err == 0) { 1791 sc->sc_ifp->if_opackets++; 1792 sc->sc_ifp->if_obytes += len; 1793 if (mflags & M_MCAST) 1794 sc->sc_ifp->if_omcasts++; 1795 } 1796 } 1797 1798 /* 1799 * bridge_dummynet: 1800 * 1801 * Receive a queued packet from dummynet and pass it on to the output 1802 * interface. 1803 * 1804 * The mbuf has the Ethernet header already attached. 1805 */ 1806 static void 1807 bridge_dummynet(struct mbuf *m, struct ifnet *ifp) 1808 { 1809 struct bridge_softc *sc; 1810 1811 sc = ifp->if_bridge; 1812 1813 /* 1814 * The packet didnt originate from a member interface. This should only 1815 * ever happen if a member interface is removed while packets are 1816 * queued for it. 1817 */ 1818 if (sc == NULL) { 1819 m_freem(m); 1820 return; 1821 } 1822 1823 if (PFIL_HOOKED(&V_inet_pfil_hook) 1824 #ifdef INET6 1825 || PFIL_HOOKED(&V_inet6_pfil_hook) 1826 #endif 1827 ) { 1828 if (bridge_pfil(&m, sc->sc_ifp, ifp, PFIL_OUT) != 0) 1829 return; 1830 if (m == NULL) 1831 return; 1832 } 1833 1834 bridge_enqueue(sc, ifp, m); 1835 } 1836 1837 /* 1838 * bridge_output: 1839 * 1840 * Send output from a bridge member interface. This 1841 * performs the bridging function for locally originated 1842 * packets. 1843 * 1844 * The mbuf has the Ethernet header already attached. We must 1845 * enqueue or free the mbuf before returning. 1846 */ 1847 static int 1848 bridge_output(struct ifnet *ifp, struct mbuf *m, struct sockaddr *sa, 1849 struct rtentry *rt) 1850 { 1851 struct ether_header *eh; 1852 struct ifnet *dst_if; 1853 struct bridge_softc *sc; 1854 uint16_t vlan; 1855 1856 if (m->m_len < ETHER_HDR_LEN) { 1857 m = m_pullup(m, ETHER_HDR_LEN); 1858 if (m == NULL) 1859 return (0); 1860 } 1861 1862 eh = mtod(m, struct ether_header *); 1863 sc = ifp->if_bridge; 1864 vlan = VLANTAGOF(m); 1865 1866 BRIDGE_LOCK(sc); 1867 1868 /* 1869 * If bridge is down, but the original output interface is up, 1870 * go ahead and send out that interface. Otherwise, the packet 1871 * is dropped below. 1872 */ 1873 if ((sc->sc_ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { 1874 dst_if = ifp; 1875 goto sendunicast; 1876 } 1877 1878 /* 1879 * If the packet is a multicast, or we don't know a better way to 1880 * get there, send to all interfaces. 1881 */ 1882 if (ETHER_IS_MULTICAST(eh->ether_dhost)) 1883 dst_if = NULL; 1884 else 1885 dst_if = bridge_rtlookup(sc, eh->ether_dhost, vlan); 1886 if (dst_if == NULL) { 1887 struct bridge_iflist *bif; 1888 struct mbuf *mc; 1889 int error = 0, used = 0; 1890 1891 bridge_span(sc, m); 1892 1893 BRIDGE_LOCK2REF(sc, error); 1894 if (error) { 1895 m_freem(m); 1896 return (0); 1897 } 1898 1899 LIST_FOREACH(bif, &sc->sc_iflist, bif_next) { 1900 dst_if = bif->bif_ifp; 1901 1902 if (dst_if->if_type == IFT_GIF) 1903 continue; 1904 if ((dst_if->if_drv_flags & IFF_DRV_RUNNING) == 0) 1905 continue; 1906 1907 /* 1908 * If this is not the original output interface, 1909 * and the interface is participating in spanning 1910 * tree, make sure the port is in a state that 1911 * allows forwarding. 1912 */ 1913 if (dst_if != ifp && (bif->bif_flags & IFBIF_STP) && 1914 bif->bif_stp.bp_state == BSTP_IFSTATE_DISCARDING) 1915 continue; 1916 1917 if (LIST_NEXT(bif, bif_next) == NULL) { 1918 used = 1; 1919 mc = m; 1920 } else { 1921 mc = m_copypacket(m, M_DONTWAIT); 1922 if (mc == NULL) { 1923 sc->sc_ifp->if_oerrors++; 1924 continue; 1925 } 1926 } 1927 1928 bridge_enqueue(sc, dst_if, mc); 1929 } 1930 if (used == 0) 1931 m_freem(m); 1932 BRIDGE_UNREF(sc); 1933 return (0); 1934 } 1935 1936 sendunicast: 1937 /* 1938 * XXX Spanning tree consideration here? 1939 */ 1940 1941 bridge_span(sc, m); 1942 if ((dst_if->if_drv_flags & IFF_DRV_RUNNING) == 0) { 1943 m_freem(m); 1944 BRIDGE_UNLOCK(sc); 1945 return (0); 1946 } 1947 1948 BRIDGE_UNLOCK(sc); 1949 bridge_enqueue(sc, dst_if, m); 1950 return (0); 1951 } 1952 1953 /* 1954 * bridge_start: 1955 * 1956 * Start output on a bridge. 1957 * 1958 */ 1959 static void 1960 bridge_start(struct ifnet *ifp) 1961 { 1962 struct bridge_softc *sc; 1963 struct mbuf *m; 1964 struct ether_header *eh; 1965 struct ifnet *dst_if; 1966 1967 sc = ifp->if_softc; 1968 1969 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 1970 for (;;) { 1971 IFQ_DEQUEUE(&ifp->if_snd, m); 1972 if (m == 0) 1973 break; 1974 ETHER_BPF_MTAP(ifp, m); 1975 1976 eh = mtod(m, struct ether_header *); 1977 dst_if = NULL; 1978 1979 BRIDGE_LOCK(sc); 1980 if ((m->m_flags & (M_BCAST|M_MCAST)) == 0) { 1981 dst_if = bridge_rtlookup(sc, eh->ether_dhost, 1); 1982 } 1983 1984 if (dst_if == NULL) 1985 bridge_broadcast(sc, ifp, m, 0); 1986 else { 1987 BRIDGE_UNLOCK(sc); 1988 bridge_enqueue(sc, dst_if, m); 1989 } 1990 } 1991 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 1992 } 1993 1994 /* 1995 * bridge_forward: 1996 * 1997 * The forwarding function of the bridge. 1998 * 1999 * NOTE: Releases the lock on return. 2000 */ 2001 static void 2002 bridge_forward(struct bridge_softc *sc, struct bridge_iflist *sbif, 2003 struct mbuf *m) 2004 { 2005 struct bridge_iflist *dbif; 2006 struct ifnet *src_if, *dst_if, *ifp; 2007 struct ether_header *eh; 2008 uint16_t vlan; 2009 uint8_t *dst; 2010 int error; 2011 2012 src_if = m->m_pkthdr.rcvif; 2013 ifp = sc->sc_ifp; 2014 2015 ifp->if_ipackets++; 2016 ifp->if_ibytes += m->m_pkthdr.len; 2017 vlan = VLANTAGOF(m); 2018 2019 if ((sbif->bif_flags & IFBIF_STP) && 2020 sbif->bif_stp.bp_state == BSTP_IFSTATE_DISCARDING) 2021 goto drop; 2022 2023 eh = mtod(m, struct ether_header *); 2024 dst = eh->ether_dhost; 2025 2026 /* If the interface is learning, record the address. */ 2027 if (sbif->bif_flags & IFBIF_LEARNING) { 2028 error = bridge_rtupdate(sc, eh->ether_shost, vlan, 2029 sbif, 0, IFBAF_DYNAMIC); 2030 /* 2031 * If the interface has addresses limits then deny any source 2032 * that is not in the cache. 2033 */ 2034 if (error && sbif->bif_addrmax) 2035 goto drop; 2036 } 2037 2038 if ((sbif->bif_flags & IFBIF_STP) != 0 && 2039 sbif->bif_stp.bp_state == BSTP_IFSTATE_LEARNING) 2040 goto drop; 2041 2042 /* 2043 * At this point, the port either doesn't participate 2044 * in spanning tree or it is in the forwarding state. 2045 */ 2046 2047 /* 2048 * If the packet is unicast, destined for someone on 2049 * "this" side of the bridge, drop it. 2050 */ 2051 if ((m->m_flags & (M_BCAST|M_MCAST)) == 0) { 2052 dst_if = bridge_rtlookup(sc, dst, vlan); 2053 if (src_if == dst_if) 2054 goto drop; 2055 } else { 2056 /* 2057 * Check if its a reserved multicast address, any address 2058 * listed in 802.1D section 7.12.6 may not be forwarded by the 2059 * bridge. 2060 * This is currently 01-80-C2-00-00-00 to 01-80-C2-00-00-0F 2061 */ 2062 if (dst[0] == 0x01 && dst[1] == 0x80 && 2063 dst[2] == 0xc2 && dst[3] == 0x00 && 2064 dst[4] == 0x00 && dst[5] <= 0x0f) 2065 goto drop; 2066 2067 /* ...forward it to all interfaces. */ 2068 ifp->if_imcasts++; 2069 dst_if = NULL; 2070 } 2071 2072 /* 2073 * If we have a destination interface which is a member of our bridge, 2074 * OR this is a unicast packet, push it through the bpf(4) machinery. 2075 * For broadcast or multicast packets, don't bother because it will 2076 * be reinjected into ether_input. We do this before we pass the packets 2077 * through the pfil(9) framework, as it is possible that pfil(9) will 2078 * drop the packet, or possibly modify it, making it difficult to debug 2079 * firewall issues on the bridge. 2080 */ 2081 if (dst_if != NULL || (m->m_flags & (M_BCAST | M_MCAST)) == 0) 2082 ETHER_BPF_MTAP(ifp, m); 2083 2084 /* run the packet filter */ 2085 if (PFIL_HOOKED(&V_inet_pfil_hook) 2086 #ifdef INET6 2087 || PFIL_HOOKED(&V_inet6_pfil_hook) 2088 #endif 2089 ) { 2090 BRIDGE_UNLOCK(sc); 2091 if (bridge_pfil(&m, ifp, src_if, PFIL_IN) != 0) 2092 return; 2093 if (m == NULL) 2094 return; 2095 BRIDGE_LOCK(sc); 2096 } 2097 2098 if (dst_if == NULL) { 2099 bridge_broadcast(sc, src_if, m, 1); 2100 return; 2101 } 2102 2103 /* 2104 * At this point, we're dealing with a unicast frame 2105 * going to a different interface. 2106 */ 2107 if ((dst_if->if_drv_flags & IFF_DRV_RUNNING) == 0) 2108 goto drop; 2109 2110 dbif = bridge_lookup_member_if(sc, dst_if); 2111 if (dbif == NULL) 2112 /* Not a member of the bridge (anymore?) */ 2113 goto drop; 2114 2115 /* Private segments can not talk to each other */ 2116 if (sbif->bif_flags & dbif->bif_flags & IFBIF_PRIVATE) 2117 goto drop; 2118 2119 if ((dbif->bif_flags & IFBIF_STP) && 2120 dbif->bif_stp.bp_state == BSTP_IFSTATE_DISCARDING) 2121 goto drop; 2122 2123 BRIDGE_UNLOCK(sc); 2124 2125 if (PFIL_HOOKED(&V_inet_pfil_hook) 2126 #ifdef INET6 2127 || PFIL_HOOKED(&V_inet6_pfil_hook) 2128 #endif 2129 ) { 2130 if (bridge_pfil(&m, ifp, dst_if, PFIL_OUT) != 0) 2131 return; 2132 if (m == NULL) 2133 return; 2134 } 2135 2136 bridge_enqueue(sc, dst_if, m); 2137 return; 2138 2139 drop: 2140 BRIDGE_UNLOCK(sc); 2141 m_freem(m); 2142 } 2143 2144 /* 2145 * bridge_input: 2146 * 2147 * Receive input from a member interface. Queue the packet for 2148 * bridging if it is not for us. 2149 */ 2150 static struct mbuf * 2151 bridge_input(struct ifnet *ifp, struct mbuf *m) 2152 { 2153 struct bridge_softc *sc = ifp->if_bridge; 2154 struct bridge_iflist *bif, *bif2; 2155 struct ifnet *bifp; 2156 struct ether_header *eh; 2157 struct mbuf *mc, *mc2; 2158 uint16_t vlan; 2159 int error; 2160 2161 if ((sc->sc_ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) 2162 return (m); 2163 2164 bifp = sc->sc_ifp; 2165 vlan = VLANTAGOF(m); 2166 2167 /* 2168 * Implement support for bridge monitoring. If this flag has been 2169 * set on this interface, discard the packet once we push it through 2170 * the bpf(4) machinery, but before we do, increment the byte and 2171 * packet counters associated with this interface. 2172 */ 2173 if ((bifp->if_flags & IFF_MONITOR) != 0) { 2174 m->m_pkthdr.rcvif = bifp; 2175 ETHER_BPF_MTAP(bifp, m); 2176 bifp->if_ipackets++; 2177 bifp->if_ibytes += m->m_pkthdr.len; 2178 m_freem(m); 2179 return (NULL); 2180 } 2181 BRIDGE_LOCK(sc); 2182 bif = bridge_lookup_member_if(sc, ifp); 2183 if (bif == NULL) { 2184 BRIDGE_UNLOCK(sc); 2185 return (m); 2186 } 2187 2188 eh = mtod(m, struct ether_header *); 2189 2190 bridge_span(sc, m); 2191 2192 if (m->m_flags & (M_BCAST|M_MCAST)) { 2193 /* Tap off 802.1D packets; they do not get forwarded. */ 2194 if (memcmp(eh->ether_dhost, bstp_etheraddr, 2195 ETHER_ADDR_LEN) == 0) { 2196 m = bstp_input(&bif->bif_stp, ifp, m); 2197 if (m == NULL) { 2198 BRIDGE_UNLOCK(sc); 2199 return (NULL); 2200 } 2201 } 2202 2203 if ((bif->bif_flags & IFBIF_STP) && 2204 bif->bif_stp.bp_state == BSTP_IFSTATE_DISCARDING) { 2205 BRIDGE_UNLOCK(sc); 2206 return (m); 2207 } 2208 2209 /* 2210 * Make a deep copy of the packet and enqueue the copy 2211 * for bridge processing; return the original packet for 2212 * local processing. 2213 */ 2214 mc = m_dup(m, M_DONTWAIT); 2215 if (mc == NULL) { 2216 BRIDGE_UNLOCK(sc); 2217 return (m); 2218 } 2219 2220 /* Perform the bridge forwarding function with the copy. */ 2221 bridge_forward(sc, bif, mc); 2222 2223 /* 2224 * Reinject the mbuf as arriving on the bridge so we have a 2225 * chance at claiming multicast packets. We can not loop back 2226 * here from ether_input as a bridge is never a member of a 2227 * bridge. 2228 */ 2229 KASSERT(bifp->if_bridge == NULL, 2230 ("loop created in bridge_input")); 2231 mc2 = m_dup(m, M_DONTWAIT); 2232 if (mc2 != NULL) { 2233 /* Keep the layer3 header aligned */ 2234 int i = min(mc2->m_pkthdr.len, max_protohdr); 2235 mc2 = m_copyup(mc2, i, ETHER_ALIGN); 2236 } 2237 if (mc2 != NULL) { 2238 mc2->m_pkthdr.rcvif = bifp; 2239 (*bifp->if_input)(bifp, mc2); 2240 } 2241 2242 /* Return the original packet for local processing. */ 2243 return (m); 2244 } 2245 2246 if ((bif->bif_flags & IFBIF_STP) && 2247 bif->bif_stp.bp_state == BSTP_IFSTATE_DISCARDING) { 2248 BRIDGE_UNLOCK(sc); 2249 return (m); 2250 } 2251 2252 #if (defined(INET) || defined(INET6)) 2253 # define OR_CARP_CHECK_WE_ARE_DST(iface) \ 2254 || ((iface)->if_carp \ 2255 && (*carp_forus_p)((iface), eh->ether_dhost)) 2256 # define OR_CARP_CHECK_WE_ARE_SRC(iface) \ 2257 || ((iface)->if_carp \ 2258 && (*carp_forus_p)((iface), eh->ether_shost)) 2259 #else 2260 # define OR_CARP_CHECK_WE_ARE_DST(iface) 2261 # define OR_CARP_CHECK_WE_ARE_SRC(iface) 2262 #endif 2263 2264 #ifdef INET6 2265 # define OR_PFIL_HOOKED_INET6 \ 2266 || PFIL_HOOKED(&V_inet6_pfil_hook) 2267 #else 2268 # define OR_PFIL_HOOKED_INET6 2269 #endif 2270 2271 #define GRAB_OUR_PACKETS(iface) \ 2272 if ((iface)->if_type == IFT_GIF) \ 2273 continue; \ 2274 /* It is destined for us. */ \ 2275 if (memcmp(IF_LLADDR((iface)), eh->ether_dhost, ETHER_ADDR_LEN) == 0 \ 2276 OR_CARP_CHECK_WE_ARE_DST((iface)) \ 2277 ) { \ 2278 if ((iface)->if_type == IFT_BRIDGE) { \ 2279 ETHER_BPF_MTAP(iface, m); \ 2280 iface->if_ipackets++; \ 2281 /* Filter on the physical interface. */ \ 2282 if (pfil_local_phys && \ 2283 (PFIL_HOOKED(&V_inet_pfil_hook) \ 2284 OR_PFIL_HOOKED_INET6)) { \ 2285 if (bridge_pfil(&m, NULL, ifp, \ 2286 PFIL_IN) != 0 || m == NULL) { \ 2287 BRIDGE_UNLOCK(sc); \ 2288 return (NULL); \ 2289 } \ 2290 } \ 2291 } \ 2292 if (bif->bif_flags & IFBIF_LEARNING) { \ 2293 error = bridge_rtupdate(sc, eh->ether_shost, \ 2294 vlan, bif, 0, IFBAF_DYNAMIC); \ 2295 if (error && bif->bif_addrmax) { \ 2296 BRIDGE_UNLOCK(sc); \ 2297 m_freem(m); \ 2298 return (NULL); \ 2299 } \ 2300 } \ 2301 m->m_pkthdr.rcvif = iface; \ 2302 BRIDGE_UNLOCK(sc); \ 2303 return (m); \ 2304 } \ 2305 \ 2306 /* We just received a packet that we sent out. */ \ 2307 if (memcmp(IF_LLADDR((iface)), eh->ether_shost, ETHER_ADDR_LEN) == 0 \ 2308 OR_CARP_CHECK_WE_ARE_SRC((iface)) \ 2309 ) { \ 2310 BRIDGE_UNLOCK(sc); \ 2311 m_freem(m); \ 2312 return (NULL); \ 2313 } 2314 2315 /* 2316 * Unicast. Make sure it's not for the bridge. 2317 */ 2318 do { GRAB_OUR_PACKETS(bifp) } while (0); 2319 2320 /* 2321 * Give a chance for ifp at first priority. This will help when the 2322 * packet comes through the interface like VLAN's with the same MACs 2323 * on several interfaces from the same bridge. This also will save 2324 * some CPU cycles in case the destination interface and the input 2325 * interface (eq ifp) are the same. 2326 */ 2327 do { GRAB_OUR_PACKETS(ifp) } while (0); 2328 2329 /* Now check the all bridge members. */ 2330 LIST_FOREACH(bif2, &sc->sc_iflist, bif_next) { 2331 GRAB_OUR_PACKETS(bif2->bif_ifp) 2332 } 2333 2334 #undef OR_CARP_CHECK_WE_ARE_DST 2335 #undef OR_CARP_CHECK_WE_ARE_SRC 2336 #undef OR_PFIL_HOOKED_INET6 2337 #undef GRAB_OUR_PACKETS 2338 2339 /* Perform the bridge forwarding function. */ 2340 bridge_forward(sc, bif, m); 2341 2342 return (NULL); 2343 } 2344 2345 /* 2346 * bridge_broadcast: 2347 * 2348 * Send a frame to all interfaces that are members of 2349 * the bridge, except for the one on which the packet 2350 * arrived. 2351 * 2352 * NOTE: Releases the lock on return. 2353 */ 2354 static void 2355 bridge_broadcast(struct bridge_softc *sc, struct ifnet *src_if, 2356 struct mbuf *m, int runfilt) 2357 { 2358 struct bridge_iflist *dbif, *sbif; 2359 struct mbuf *mc; 2360 struct ifnet *dst_if; 2361 int error = 0, used = 0, i; 2362 2363 sbif = bridge_lookup_member_if(sc, src_if); 2364 2365 BRIDGE_LOCK2REF(sc, error); 2366 if (error) { 2367 m_freem(m); 2368 return; 2369 } 2370 2371 /* Filter on the bridge interface before broadcasting */ 2372 if (runfilt && (PFIL_HOOKED(&V_inet_pfil_hook) 2373 #ifdef INET6 2374 || PFIL_HOOKED(&V_inet6_pfil_hook) 2375 #endif 2376 )) { 2377 if (bridge_pfil(&m, sc->sc_ifp, NULL, PFIL_OUT) != 0) 2378 goto out; 2379 if (m == NULL) 2380 goto out; 2381 } 2382 2383 LIST_FOREACH(dbif, &sc->sc_iflist, bif_next) { 2384 dst_if = dbif->bif_ifp; 2385 if (dst_if == src_if) 2386 continue; 2387 2388 /* Private segments can not talk to each other */ 2389 if (sbif && (sbif->bif_flags & dbif->bif_flags & IFBIF_PRIVATE)) 2390 continue; 2391 2392 if ((dbif->bif_flags & IFBIF_STP) && 2393 dbif->bif_stp.bp_state == BSTP_IFSTATE_DISCARDING) 2394 continue; 2395 2396 if ((dbif->bif_flags & IFBIF_DISCOVER) == 0 && 2397 (m->m_flags & (M_BCAST|M_MCAST)) == 0) 2398 continue; 2399 2400 if ((dst_if->if_drv_flags & IFF_DRV_RUNNING) == 0) 2401 continue; 2402 2403 if (LIST_NEXT(dbif, bif_next) == NULL) { 2404 mc = m; 2405 used = 1; 2406 } else { 2407 mc = m_dup(m, M_DONTWAIT); 2408 if (mc == NULL) { 2409 sc->sc_ifp->if_oerrors++; 2410 continue; 2411 } 2412 } 2413 2414 /* 2415 * Filter on the output interface. Pass a NULL bridge interface 2416 * pointer so we do not redundantly filter on the bridge for 2417 * each interface we broadcast on. 2418 */ 2419 if (runfilt && (PFIL_HOOKED(&V_inet_pfil_hook) 2420 #ifdef INET6 2421 || PFIL_HOOKED(&V_inet6_pfil_hook) 2422 #endif 2423 )) { 2424 if (used == 0) { 2425 /* Keep the layer3 header aligned */ 2426 i = min(mc->m_pkthdr.len, max_protohdr); 2427 mc = m_copyup(mc, i, ETHER_ALIGN); 2428 if (mc == NULL) { 2429 sc->sc_ifp->if_oerrors++; 2430 continue; 2431 } 2432 } 2433 if (bridge_pfil(&mc, NULL, dst_if, PFIL_OUT) != 0) 2434 continue; 2435 if (mc == NULL) 2436 continue; 2437 } 2438 2439 bridge_enqueue(sc, dst_if, mc); 2440 } 2441 if (used == 0) 2442 m_freem(m); 2443 2444 out: 2445 BRIDGE_UNREF(sc); 2446 } 2447 2448 /* 2449 * bridge_span: 2450 * 2451 * Duplicate a packet out one or more interfaces that are in span mode, 2452 * the original mbuf is unmodified. 2453 */ 2454 static void 2455 bridge_span(struct bridge_softc *sc, struct mbuf *m) 2456 { 2457 struct bridge_iflist *bif; 2458 struct ifnet *dst_if; 2459 struct mbuf *mc; 2460 2461 if (LIST_EMPTY(&sc->sc_spanlist)) 2462 return; 2463 2464 LIST_FOREACH(bif, &sc->sc_spanlist, bif_next) { 2465 dst_if = bif->bif_ifp; 2466 2467 if ((dst_if->if_drv_flags & IFF_DRV_RUNNING) == 0) 2468 continue; 2469 2470 mc = m_copypacket(m, M_DONTWAIT); 2471 if (mc == NULL) { 2472 sc->sc_ifp->if_oerrors++; 2473 continue; 2474 } 2475 2476 bridge_enqueue(sc, dst_if, mc); 2477 } 2478 } 2479 2480 /* 2481 * bridge_rtupdate: 2482 * 2483 * Add a bridge routing entry. 2484 */ 2485 static int 2486 bridge_rtupdate(struct bridge_softc *sc, const uint8_t *dst, uint16_t vlan, 2487 struct bridge_iflist *bif, int setflags, uint8_t flags) 2488 { 2489 struct bridge_rtnode *brt; 2490 int error; 2491 2492 BRIDGE_LOCK_ASSERT(sc); 2493 2494 /* Check the source address is valid and not multicast. */ 2495 if (ETHER_IS_MULTICAST(dst) || 2496 (dst[0] == 0 && dst[1] == 0 && dst[2] == 0 && 2497 dst[3] == 0 && dst[4] == 0 && dst[5] == 0) != 0) 2498 return (EINVAL); 2499 2500 /* 802.1p frames map to vlan 1 */ 2501 if (vlan == 0) 2502 vlan = 1; 2503 2504 /* 2505 * A route for this destination might already exist. If so, 2506 * update it, otherwise create a new one. 2507 */ 2508 if ((brt = bridge_rtnode_lookup(sc, dst, vlan)) == NULL) { 2509 if (sc->sc_brtcnt >= sc->sc_brtmax) { 2510 sc->sc_brtexceeded++; 2511 return (ENOSPC); 2512 } 2513 /* Check per interface address limits (if enabled) */ 2514 if (bif->bif_addrmax && bif->bif_addrcnt >= bif->bif_addrmax) { 2515 bif->bif_addrexceeded++; 2516 return (ENOSPC); 2517 } 2518 2519 /* 2520 * Allocate a new bridge forwarding node, and 2521 * initialize the expiration time and Ethernet 2522 * address. 2523 */ 2524 brt = uma_zalloc(bridge_rtnode_zone, M_NOWAIT | M_ZERO); 2525 if (brt == NULL) 2526 return (ENOMEM); 2527 2528 if (bif->bif_flags & IFBIF_STICKY) 2529 brt->brt_flags = IFBAF_STICKY; 2530 else 2531 brt->brt_flags = IFBAF_DYNAMIC; 2532 2533 memcpy(brt->brt_addr, dst, ETHER_ADDR_LEN); 2534 brt->brt_vlan = vlan; 2535 2536 if ((error = bridge_rtnode_insert(sc, brt)) != 0) { 2537 uma_zfree(bridge_rtnode_zone, brt); 2538 return (error); 2539 } 2540 brt->brt_dst = bif; 2541 bif->bif_addrcnt++; 2542 } 2543 2544 if ((brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC && 2545 brt->brt_dst != bif) { 2546 brt->brt_dst->bif_addrcnt--; 2547 brt->brt_dst = bif; 2548 brt->brt_dst->bif_addrcnt++; 2549 } 2550 2551 if ((flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC) 2552 brt->brt_expire = time_uptime + sc->sc_brttimeout; 2553 if (setflags) 2554 brt->brt_flags = flags; 2555 2556 return (0); 2557 } 2558 2559 /* 2560 * bridge_rtlookup: 2561 * 2562 * Lookup the destination interface for an address. 2563 */ 2564 static struct ifnet * 2565 bridge_rtlookup(struct bridge_softc *sc, const uint8_t *addr, uint16_t vlan) 2566 { 2567 struct bridge_rtnode *brt; 2568 2569 BRIDGE_LOCK_ASSERT(sc); 2570 2571 if ((brt = bridge_rtnode_lookup(sc, addr, vlan)) == NULL) 2572 return (NULL); 2573 2574 return (brt->brt_ifp); 2575 } 2576 2577 /* 2578 * bridge_rttrim: 2579 * 2580 * Trim the routine table so that we have a number 2581 * of routing entries less than or equal to the 2582 * maximum number. 2583 */ 2584 static void 2585 bridge_rttrim(struct bridge_softc *sc) 2586 { 2587 struct bridge_rtnode *brt, *nbrt; 2588 2589 BRIDGE_LOCK_ASSERT(sc); 2590 2591 /* Make sure we actually need to do this. */ 2592 if (sc->sc_brtcnt <= sc->sc_brtmax) 2593 return; 2594 2595 /* Force an aging cycle; this might trim enough addresses. */ 2596 bridge_rtage(sc); 2597 if (sc->sc_brtcnt <= sc->sc_brtmax) 2598 return; 2599 2600 LIST_FOREACH_SAFE(brt, &sc->sc_rtlist, brt_list, nbrt) { 2601 if ((brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC) { 2602 bridge_rtnode_destroy(sc, brt); 2603 if (sc->sc_brtcnt <= sc->sc_brtmax) 2604 return; 2605 } 2606 } 2607 } 2608 2609 /* 2610 * bridge_timer: 2611 * 2612 * Aging timer for the bridge. 2613 */ 2614 static void 2615 bridge_timer(void *arg) 2616 { 2617 struct bridge_softc *sc = arg; 2618 2619 BRIDGE_LOCK_ASSERT(sc); 2620 2621 bridge_rtage(sc); 2622 2623 if (sc->sc_ifp->if_drv_flags & IFF_DRV_RUNNING) 2624 callout_reset(&sc->sc_brcallout, 2625 bridge_rtable_prune_period * hz, bridge_timer, sc); 2626 } 2627 2628 /* 2629 * bridge_rtage: 2630 * 2631 * Perform an aging cycle. 2632 */ 2633 static void 2634 bridge_rtage(struct bridge_softc *sc) 2635 { 2636 struct bridge_rtnode *brt, *nbrt; 2637 2638 BRIDGE_LOCK_ASSERT(sc); 2639 2640 LIST_FOREACH_SAFE(brt, &sc->sc_rtlist, brt_list, nbrt) { 2641 if ((brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC) { 2642 if (time_uptime >= brt->brt_expire) 2643 bridge_rtnode_destroy(sc, brt); 2644 } 2645 } 2646 } 2647 2648 /* 2649 * bridge_rtflush: 2650 * 2651 * Remove all dynamic addresses from the bridge. 2652 */ 2653 static void 2654 bridge_rtflush(struct bridge_softc *sc, int full) 2655 { 2656 struct bridge_rtnode *brt, *nbrt; 2657 2658 BRIDGE_LOCK_ASSERT(sc); 2659 2660 LIST_FOREACH_SAFE(brt, &sc->sc_rtlist, brt_list, nbrt) { 2661 if (full || (brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC) 2662 bridge_rtnode_destroy(sc, brt); 2663 } 2664 } 2665 2666 /* 2667 * bridge_rtdaddr: 2668 * 2669 * Remove an address from the table. 2670 */ 2671 static int 2672 bridge_rtdaddr(struct bridge_softc *sc, const uint8_t *addr, uint16_t vlan) 2673 { 2674 struct bridge_rtnode *brt; 2675 int found = 0; 2676 2677 BRIDGE_LOCK_ASSERT(sc); 2678 2679 /* 2680 * If vlan is zero then we want to delete for all vlans so the lookup 2681 * may return more than one. 2682 */ 2683 while ((brt = bridge_rtnode_lookup(sc, addr, vlan)) != NULL) { 2684 bridge_rtnode_destroy(sc, brt); 2685 found = 1; 2686 } 2687 2688 return (found ? 0 : ENOENT); 2689 } 2690 2691 /* 2692 * bridge_rtdelete: 2693 * 2694 * Delete routes to a speicifc member interface. 2695 */ 2696 static void 2697 bridge_rtdelete(struct bridge_softc *sc, struct ifnet *ifp, int full) 2698 { 2699 struct bridge_rtnode *brt, *nbrt; 2700 2701 BRIDGE_LOCK_ASSERT(sc); 2702 2703 LIST_FOREACH_SAFE(brt, &sc->sc_rtlist, brt_list, nbrt) { 2704 if (brt->brt_ifp == ifp && (full || 2705 (brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC)) 2706 bridge_rtnode_destroy(sc, brt); 2707 } 2708 } 2709 2710 /* 2711 * bridge_rtable_init: 2712 * 2713 * Initialize the route table for this bridge. 2714 */ 2715 static int 2716 bridge_rtable_init(struct bridge_softc *sc) 2717 { 2718 int i; 2719 2720 sc->sc_rthash = malloc(sizeof(*sc->sc_rthash) * BRIDGE_RTHASH_SIZE, 2721 M_DEVBUF, M_NOWAIT); 2722 if (sc->sc_rthash == NULL) 2723 return (ENOMEM); 2724 2725 for (i = 0; i < BRIDGE_RTHASH_SIZE; i++) 2726 LIST_INIT(&sc->sc_rthash[i]); 2727 2728 sc->sc_rthash_key = arc4random(); 2729 2730 LIST_INIT(&sc->sc_rtlist); 2731 2732 return (0); 2733 } 2734 2735 /* 2736 * bridge_rtable_fini: 2737 * 2738 * Deconstruct the route table for this bridge. 2739 */ 2740 static void 2741 bridge_rtable_fini(struct bridge_softc *sc) 2742 { 2743 2744 KASSERT(sc->sc_brtcnt == 0, 2745 ("%s: %d bridge routes referenced", __func__, sc->sc_brtcnt)); 2746 free(sc->sc_rthash, M_DEVBUF); 2747 } 2748 2749 /* 2750 * The following hash function is adapted from "Hash Functions" by Bob Jenkins 2751 * ("Algorithm Alley", Dr. Dobbs Journal, September 1997). 2752 */ 2753 #define mix(a, b, c) \ 2754 do { \ 2755 a -= b; a -= c; a ^= (c >> 13); \ 2756 b -= c; b -= a; b ^= (a << 8); \ 2757 c -= a; c -= b; c ^= (b >> 13); \ 2758 a -= b; a -= c; a ^= (c >> 12); \ 2759 b -= c; b -= a; b ^= (a << 16); \ 2760 c -= a; c -= b; c ^= (b >> 5); \ 2761 a -= b; a -= c; a ^= (c >> 3); \ 2762 b -= c; b -= a; b ^= (a << 10); \ 2763 c -= a; c -= b; c ^= (b >> 15); \ 2764 } while (/*CONSTCOND*/0) 2765 2766 static __inline uint32_t 2767 bridge_rthash(struct bridge_softc *sc, const uint8_t *addr) 2768 { 2769 uint32_t a = 0x9e3779b9, b = 0x9e3779b9, c = sc->sc_rthash_key; 2770 2771 b += addr[5] << 8; 2772 b += addr[4]; 2773 a += addr[3] << 24; 2774 a += addr[2] << 16; 2775 a += addr[1] << 8; 2776 a += addr[0]; 2777 2778 mix(a, b, c); 2779 2780 return (c & BRIDGE_RTHASH_MASK); 2781 } 2782 2783 #undef mix 2784 2785 static int 2786 bridge_rtnode_addr_cmp(const uint8_t *a, const uint8_t *b) 2787 { 2788 int i, d; 2789 2790 for (i = 0, d = 0; i < ETHER_ADDR_LEN && d == 0; i++) { 2791 d = ((int)a[i]) - ((int)b[i]); 2792 } 2793 2794 return (d); 2795 } 2796 2797 /* 2798 * bridge_rtnode_lookup: 2799 * 2800 * Look up a bridge route node for the specified destination. Compare the 2801 * vlan id or if zero then just return the first match. 2802 */ 2803 static struct bridge_rtnode * 2804 bridge_rtnode_lookup(struct bridge_softc *sc, const uint8_t *addr, uint16_t vlan) 2805 { 2806 struct bridge_rtnode *brt; 2807 uint32_t hash; 2808 int dir; 2809 2810 BRIDGE_LOCK_ASSERT(sc); 2811 2812 hash = bridge_rthash(sc, addr); 2813 LIST_FOREACH(brt, &sc->sc_rthash[hash], brt_hash) { 2814 dir = bridge_rtnode_addr_cmp(addr, brt->brt_addr); 2815 if (dir == 0 && (brt->brt_vlan == vlan || vlan == 0)) 2816 return (brt); 2817 if (dir > 0) 2818 return (NULL); 2819 } 2820 2821 return (NULL); 2822 } 2823 2824 /* 2825 * bridge_rtnode_insert: 2826 * 2827 * Insert the specified bridge node into the route table. We 2828 * assume the entry is not already in the table. 2829 */ 2830 static int 2831 bridge_rtnode_insert(struct bridge_softc *sc, struct bridge_rtnode *brt) 2832 { 2833 struct bridge_rtnode *lbrt; 2834 uint32_t hash; 2835 int dir; 2836 2837 BRIDGE_LOCK_ASSERT(sc); 2838 2839 hash = bridge_rthash(sc, brt->brt_addr); 2840 2841 lbrt = LIST_FIRST(&sc->sc_rthash[hash]); 2842 if (lbrt == NULL) { 2843 LIST_INSERT_HEAD(&sc->sc_rthash[hash], brt, brt_hash); 2844 goto out; 2845 } 2846 2847 do { 2848 dir = bridge_rtnode_addr_cmp(brt->brt_addr, lbrt->brt_addr); 2849 if (dir == 0 && brt->brt_vlan == lbrt->brt_vlan) 2850 return (EEXIST); 2851 if (dir > 0) { 2852 LIST_INSERT_BEFORE(lbrt, brt, brt_hash); 2853 goto out; 2854 } 2855 if (LIST_NEXT(lbrt, brt_hash) == NULL) { 2856 LIST_INSERT_AFTER(lbrt, brt, brt_hash); 2857 goto out; 2858 } 2859 lbrt = LIST_NEXT(lbrt, brt_hash); 2860 } while (lbrt != NULL); 2861 2862 #ifdef DIAGNOSTIC 2863 panic("bridge_rtnode_insert: impossible"); 2864 #endif 2865 2866 out: 2867 LIST_INSERT_HEAD(&sc->sc_rtlist, brt, brt_list); 2868 sc->sc_brtcnt++; 2869 2870 return (0); 2871 } 2872 2873 /* 2874 * bridge_rtnode_destroy: 2875 * 2876 * Destroy a bridge rtnode. 2877 */ 2878 static void 2879 bridge_rtnode_destroy(struct bridge_softc *sc, struct bridge_rtnode *brt) 2880 { 2881 BRIDGE_LOCK_ASSERT(sc); 2882 2883 LIST_REMOVE(brt, brt_hash); 2884 2885 LIST_REMOVE(brt, brt_list); 2886 sc->sc_brtcnt--; 2887 brt->brt_dst->bif_addrcnt--; 2888 uma_zfree(bridge_rtnode_zone, brt); 2889 } 2890 2891 /* 2892 * bridge_rtable_expire: 2893 * 2894 * Set the expiry time for all routes on an interface. 2895 */ 2896 static void 2897 bridge_rtable_expire(struct ifnet *ifp, int age) 2898 { 2899 struct bridge_softc *sc = ifp->if_bridge; 2900 struct bridge_rtnode *brt; 2901 2902 BRIDGE_LOCK(sc); 2903 2904 /* 2905 * If the age is zero then flush, otherwise set all the expiry times to 2906 * age for the interface 2907 */ 2908 if (age == 0) 2909 bridge_rtdelete(sc, ifp, IFBF_FLUSHDYN); 2910 else { 2911 LIST_FOREACH(brt, &sc->sc_rtlist, brt_list) { 2912 /* Cap the expiry time to 'age' */ 2913 if (brt->brt_ifp == ifp && 2914 brt->brt_expire > time_uptime + age && 2915 (brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC) 2916 brt->brt_expire = time_uptime + age; 2917 } 2918 } 2919 BRIDGE_UNLOCK(sc); 2920 } 2921 2922 /* 2923 * bridge_state_change: 2924 * 2925 * Callback from the bridgestp code when a port changes states. 2926 */ 2927 static void 2928 bridge_state_change(struct ifnet *ifp, int state) 2929 { 2930 struct bridge_softc *sc = ifp->if_bridge; 2931 static const char *stpstates[] = { 2932 "disabled", 2933 "listening", 2934 "learning", 2935 "forwarding", 2936 "blocking", 2937 "discarding" 2938 }; 2939 2940 if (log_stp) 2941 log(LOG_NOTICE, "%s: state changed to %s on %s\n", 2942 sc->sc_ifp->if_xname, stpstates[state], ifp->if_xname); 2943 } 2944 2945 /* 2946 * Send bridge packets through pfil if they are one of the types pfil can deal 2947 * with, or if they are ARP or REVARP. (pfil will pass ARP and REVARP without 2948 * question.) If *bifp or *ifp are NULL then packet filtering is skipped for 2949 * that interface. 2950 */ 2951 static int 2952 bridge_pfil(struct mbuf **mp, struct ifnet *bifp, struct ifnet *ifp, int dir) 2953 { 2954 int snap, error, i, hlen; 2955 struct ether_header *eh1, eh2; 2956 struct ip_fw_args args; 2957 struct ip *ip; 2958 struct llc llc1; 2959 u_int16_t ether_type; 2960 2961 snap = 0; 2962 error = -1; /* Default error if not error == 0 */ 2963 2964 #if 0 2965 /* we may return with the IP fields swapped, ensure its not shared */ 2966 KASSERT(M_WRITABLE(*mp), ("%s: modifying a shared mbuf", __func__)); 2967 #endif 2968 2969 if (pfil_bridge == 0 && pfil_member == 0 && pfil_ipfw == 0) 2970 return (0); /* filtering is disabled */ 2971 2972 i = min((*mp)->m_pkthdr.len, max_protohdr); 2973 if ((*mp)->m_len < i) { 2974 *mp = m_pullup(*mp, i); 2975 if (*mp == NULL) { 2976 printf("%s: m_pullup failed\n", __func__); 2977 return (-1); 2978 } 2979 } 2980 2981 eh1 = mtod(*mp, struct ether_header *); 2982 ether_type = ntohs(eh1->ether_type); 2983 2984 /* 2985 * Check for SNAP/LLC. 2986 */ 2987 if (ether_type < ETHERMTU) { 2988 struct llc *llc2 = (struct llc *)(eh1 + 1); 2989 2990 if ((*mp)->m_len >= ETHER_HDR_LEN + 8 && 2991 llc2->llc_dsap == LLC_SNAP_LSAP && 2992 llc2->llc_ssap == LLC_SNAP_LSAP && 2993 llc2->llc_control == LLC_UI) { 2994 ether_type = htons(llc2->llc_un.type_snap.ether_type); 2995 snap = 1; 2996 } 2997 } 2998 2999 /* 3000 * If we're trying to filter bridge traffic, don't look at anything 3001 * other than IP and ARP traffic. If the filter doesn't understand 3002 * IPv6, don't allow IPv6 through the bridge either. This is lame 3003 * since if we really wanted, say, an AppleTalk filter, we are hosed, 3004 * but of course we don't have an AppleTalk filter to begin with. 3005 * (Note that since pfil doesn't understand ARP it will pass *ALL* 3006 * ARP traffic.) 3007 */ 3008 switch (ether_type) { 3009 case ETHERTYPE_ARP: 3010 case ETHERTYPE_REVARP: 3011 if (pfil_ipfw_arp == 0) 3012 return (0); /* Automatically pass */ 3013 break; 3014 3015 case ETHERTYPE_IP: 3016 #ifdef INET6 3017 case ETHERTYPE_IPV6: 3018 #endif /* INET6 */ 3019 break; 3020 default: 3021 /* 3022 * Check to see if the user wants to pass non-ip 3023 * packets, these will not be checked by pfil(9) and 3024 * passed unconditionally so the default is to drop. 3025 */ 3026 if (pfil_onlyip) 3027 goto bad; 3028 } 3029 3030 /* Strip off the Ethernet header and keep a copy. */ 3031 m_copydata(*mp, 0, ETHER_HDR_LEN, (caddr_t) &eh2); 3032 m_adj(*mp, ETHER_HDR_LEN); 3033 3034 /* Strip off snap header, if present */ 3035 if (snap) { 3036 m_copydata(*mp, 0, sizeof(struct llc), (caddr_t) &llc1); 3037 m_adj(*mp, sizeof(struct llc)); 3038 } 3039 3040 /* 3041 * Check the IP header for alignment and errors 3042 */ 3043 if (dir == PFIL_IN) { 3044 switch (ether_type) { 3045 case ETHERTYPE_IP: 3046 error = bridge_ip_checkbasic(mp); 3047 break; 3048 #ifdef INET6 3049 case ETHERTYPE_IPV6: 3050 error = bridge_ip6_checkbasic(mp); 3051 break; 3052 #endif /* INET6 */ 3053 default: 3054 error = 0; 3055 } 3056 if (error) 3057 goto bad; 3058 } 3059 3060 /* XXX this section is also in if_ethersubr.c */ 3061 // XXX PFIL_OUT or DIR_OUT ? 3062 if (V_ip_fw_chk_ptr && pfil_ipfw != 0 && 3063 dir == PFIL_OUT && ifp != NULL) { 3064 struct m_tag *mtag; 3065 3066 error = -1; 3067 /* fetch the start point from existing tags, if any */ 3068 mtag = m_tag_locate(*mp, MTAG_IPFW_RULE, 0, NULL); 3069 if (mtag == NULL) { 3070 args.rule.slot = 0; 3071 } else { 3072 struct ipfw_rule_ref *r; 3073 3074 /* XXX can we free the tag after use ? */ 3075 mtag->m_tag_id = PACKET_TAG_NONE; 3076 r = (struct ipfw_rule_ref *)(mtag + 1); 3077 /* packet already partially processed ? */ 3078 if (r->info & IPFW_ONEPASS) 3079 goto ipfwpass; 3080 args.rule = *r; 3081 } 3082 3083 args.m = *mp; 3084 args.oif = ifp; 3085 args.next_hop = NULL; 3086 args.eh = &eh2; 3087 args.inp = NULL; /* used by ipfw uid/gid/jail rules */ 3088 i = V_ip_fw_chk_ptr(&args); 3089 *mp = args.m; 3090 3091 if (*mp == NULL) 3092 return (error); 3093 3094 if (ip_dn_io_ptr && (i == IP_FW_DUMMYNET)) { 3095 3096 /* put the Ethernet header back on */ 3097 M_PREPEND(*mp, ETHER_HDR_LEN, M_DONTWAIT); 3098 if (*mp == NULL) 3099 return (error); 3100 bcopy(&eh2, mtod(*mp, caddr_t), ETHER_HDR_LEN); 3101 3102 /* 3103 * Pass the pkt to dummynet, which consumes it. The 3104 * packet will return to us via bridge_dummynet(). 3105 */ 3106 args.oif = ifp; 3107 ip_dn_io_ptr(mp, DIR_FWD | PROTO_IFB, &args); 3108 return (error); 3109 } 3110 3111 if (i != IP_FW_PASS) /* drop */ 3112 goto bad; 3113 } 3114 3115 ipfwpass: 3116 error = 0; 3117 3118 /* 3119 * Run the packet through pfil 3120 */ 3121 switch (ether_type) { 3122 case ETHERTYPE_IP: 3123 /* 3124 * before calling the firewall, swap fields the same as 3125 * IP does. here we assume the header is contiguous 3126 */ 3127 ip = mtod(*mp, struct ip *); 3128 3129 ip->ip_len = ntohs(ip->ip_len); 3130 ip->ip_off = ntohs(ip->ip_off); 3131 3132 /* 3133 * Run pfil on the member interface and the bridge, both can 3134 * be skipped by clearing pfil_member or pfil_bridge. 3135 * 3136 * Keep the order: 3137 * in_if -> bridge_if -> out_if 3138 */ 3139 if (pfil_bridge && dir == PFIL_OUT && bifp != NULL) 3140 error = pfil_run_hooks(&V_inet_pfil_hook, mp, bifp, 3141 dir, NULL); 3142 3143 if (*mp == NULL || error != 0) /* filter may consume */ 3144 break; 3145 3146 if (pfil_member && ifp != NULL) 3147 error = pfil_run_hooks(&V_inet_pfil_hook, mp, ifp, 3148 dir, NULL); 3149 3150 if (*mp == NULL || error != 0) /* filter may consume */ 3151 break; 3152 3153 if (pfil_bridge && dir == PFIL_IN && bifp != NULL) 3154 error = pfil_run_hooks(&V_inet_pfil_hook, mp, bifp, 3155 dir, NULL); 3156 3157 if (*mp == NULL || error != 0) /* filter may consume */ 3158 break; 3159 3160 /* check if we need to fragment the packet */ 3161 if (pfil_member && ifp != NULL && dir == PFIL_OUT) { 3162 i = (*mp)->m_pkthdr.len; 3163 if (i > ifp->if_mtu) { 3164 error = bridge_fragment(ifp, *mp, &eh2, snap, 3165 &llc1); 3166 return (error); 3167 } 3168 } 3169 3170 /* Recalculate the ip checksum and restore byte ordering */ 3171 ip = mtod(*mp, struct ip *); 3172 hlen = ip->ip_hl << 2; 3173 if (hlen < sizeof(struct ip)) 3174 goto bad; 3175 if (hlen > (*mp)->m_len) { 3176 if ((*mp = m_pullup(*mp, hlen)) == 0) 3177 goto bad; 3178 ip = mtod(*mp, struct ip *); 3179 if (ip == NULL) 3180 goto bad; 3181 } 3182 ip->ip_len = htons(ip->ip_len); 3183 ip->ip_off = htons(ip->ip_off); 3184 ip->ip_sum = 0; 3185 if (hlen == sizeof(struct ip)) 3186 ip->ip_sum = in_cksum_hdr(ip); 3187 else 3188 ip->ip_sum = in_cksum(*mp, hlen); 3189 3190 break; 3191 #ifdef INET6 3192 case ETHERTYPE_IPV6: 3193 if (pfil_bridge && dir == PFIL_OUT && bifp != NULL) 3194 error = pfil_run_hooks(&V_inet6_pfil_hook, mp, bifp, 3195 dir, NULL); 3196 3197 if (*mp == NULL || error != 0) /* filter may consume */ 3198 break; 3199 3200 if (pfil_member && ifp != NULL) 3201 error = pfil_run_hooks(&V_inet6_pfil_hook, mp, ifp, 3202 dir, NULL); 3203 3204 if (*mp == NULL || error != 0) /* filter may consume */ 3205 break; 3206 3207 if (pfil_bridge && dir == PFIL_IN && bifp != NULL) 3208 error = pfil_run_hooks(&V_inet6_pfil_hook, mp, bifp, 3209 dir, NULL); 3210 break; 3211 #endif 3212 default: 3213 error = 0; 3214 break; 3215 } 3216 3217 if (*mp == NULL) 3218 return (error); 3219 if (error != 0) 3220 goto bad; 3221 3222 error = -1; 3223 3224 /* 3225 * Finally, put everything back the way it was and return 3226 */ 3227 if (snap) { 3228 M_PREPEND(*mp, sizeof(struct llc), M_DONTWAIT); 3229 if (*mp == NULL) 3230 return (error); 3231 bcopy(&llc1, mtod(*mp, caddr_t), sizeof(struct llc)); 3232 } 3233 3234 M_PREPEND(*mp, ETHER_HDR_LEN, M_DONTWAIT); 3235 if (*mp == NULL) 3236 return (error); 3237 bcopy(&eh2, mtod(*mp, caddr_t), ETHER_HDR_LEN); 3238 3239 return (0); 3240 3241 bad: 3242 m_freem(*mp); 3243 *mp = NULL; 3244 return (error); 3245 } 3246 3247 /* 3248 * Perform basic checks on header size since 3249 * pfil assumes ip_input has already processed 3250 * it for it. Cut-and-pasted from ip_input.c. 3251 * Given how simple the IPv6 version is, 3252 * does the IPv4 version really need to be 3253 * this complicated? 3254 * 3255 * XXX Should we update ipstat here, or not? 3256 * XXX Right now we update ipstat but not 3257 * XXX csum_counter. 3258 */ 3259 static int 3260 bridge_ip_checkbasic(struct mbuf **mp) 3261 { 3262 struct mbuf *m = *mp; 3263 struct ip *ip; 3264 int len, hlen; 3265 u_short sum; 3266 3267 if (*mp == NULL) 3268 return (-1); 3269 3270 if (IP_HDR_ALIGNED_P(mtod(m, caddr_t)) == 0) { 3271 if ((m = m_copyup(m, sizeof(struct ip), 3272 (max_linkhdr + 3) & ~3)) == NULL) { 3273 /* XXXJRT new stat, please */ 3274 KMOD_IPSTAT_INC(ips_toosmall); 3275 goto bad; 3276 } 3277 } else if (__predict_false(m->m_len < sizeof (struct ip))) { 3278 if ((m = m_pullup(m, sizeof (struct ip))) == NULL) { 3279 KMOD_IPSTAT_INC(ips_toosmall); 3280 goto bad; 3281 } 3282 } 3283 ip = mtod(m, struct ip *); 3284 if (ip == NULL) goto bad; 3285 3286 if (ip->ip_v != IPVERSION) { 3287 KMOD_IPSTAT_INC(ips_badvers); 3288 goto bad; 3289 } 3290 hlen = ip->ip_hl << 2; 3291 if (hlen < sizeof(struct ip)) { /* minimum header length */ 3292 KMOD_IPSTAT_INC(ips_badhlen); 3293 goto bad; 3294 } 3295 if (hlen > m->m_len) { 3296 if ((m = m_pullup(m, hlen)) == 0) { 3297 KMOD_IPSTAT_INC(ips_badhlen); 3298 goto bad; 3299 } 3300 ip = mtod(m, struct ip *); 3301 if (ip == NULL) goto bad; 3302 } 3303 3304 if (m->m_pkthdr.csum_flags & CSUM_IP_CHECKED) { 3305 sum = !(m->m_pkthdr.csum_flags & CSUM_IP_VALID); 3306 } else { 3307 if (hlen == sizeof(struct ip)) { 3308 sum = in_cksum_hdr(ip); 3309 } else { 3310 sum = in_cksum(m, hlen); 3311 } 3312 } 3313 if (sum) { 3314 KMOD_IPSTAT_INC(ips_badsum); 3315 goto bad; 3316 } 3317 3318 /* Retrieve the packet length. */ 3319 len = ntohs(ip->ip_len); 3320 3321 /* 3322 * Check for additional length bogosity 3323 */ 3324 if (len < hlen) { 3325 KMOD_IPSTAT_INC(ips_badlen); 3326 goto bad; 3327 } 3328 3329 /* 3330 * Check that the amount of data in the buffers 3331 * is as at least much as the IP header would have us expect. 3332 * Drop packet if shorter than we expect. 3333 */ 3334 if (m->m_pkthdr.len < len) { 3335 KMOD_IPSTAT_INC(ips_tooshort); 3336 goto bad; 3337 } 3338 3339 /* Checks out, proceed */ 3340 *mp = m; 3341 return (0); 3342 3343 bad: 3344 *mp = m; 3345 return (-1); 3346 } 3347 3348 #ifdef INET6 3349 /* 3350 * Same as above, but for IPv6. 3351 * Cut-and-pasted from ip6_input.c. 3352 * XXX Should we update ip6stat, or not? 3353 */ 3354 static int 3355 bridge_ip6_checkbasic(struct mbuf **mp) 3356 { 3357 struct mbuf *m = *mp; 3358 struct ip6_hdr *ip6; 3359 3360 /* 3361 * If the IPv6 header is not aligned, slurp it up into a new 3362 * mbuf with space for link headers, in the event we forward 3363 * it. Otherwise, if it is aligned, make sure the entire base 3364 * IPv6 header is in the first mbuf of the chain. 3365 */ 3366 if (IP6_HDR_ALIGNED_P(mtod(m, caddr_t)) == 0) { 3367 struct ifnet *inifp = m->m_pkthdr.rcvif; 3368 if ((m = m_copyup(m, sizeof(struct ip6_hdr), 3369 (max_linkhdr + 3) & ~3)) == NULL) { 3370 /* XXXJRT new stat, please */ 3371 V_ip6stat.ip6s_toosmall++; 3372 in6_ifstat_inc(inifp, ifs6_in_hdrerr); 3373 goto bad; 3374 } 3375 } else if (__predict_false(m->m_len < sizeof(struct ip6_hdr))) { 3376 struct ifnet *inifp = m->m_pkthdr.rcvif; 3377 if ((m = m_pullup(m, sizeof(struct ip6_hdr))) == NULL) { 3378 V_ip6stat.ip6s_toosmall++; 3379 in6_ifstat_inc(inifp, ifs6_in_hdrerr); 3380 goto bad; 3381 } 3382 } 3383 3384 ip6 = mtod(m, struct ip6_hdr *); 3385 3386 if ((ip6->ip6_vfc & IPV6_VERSION_MASK) != IPV6_VERSION) { 3387 V_ip6stat.ip6s_badvers++; 3388 in6_ifstat_inc(m->m_pkthdr.rcvif, ifs6_in_hdrerr); 3389 goto bad; 3390 } 3391 3392 /* Checks out, proceed */ 3393 *mp = m; 3394 return (0); 3395 3396 bad: 3397 *mp = m; 3398 return (-1); 3399 } 3400 #endif /* INET6 */ 3401 3402 /* 3403 * bridge_fragment: 3404 * 3405 * Return a fragmented mbuf chain. 3406 */ 3407 static int 3408 bridge_fragment(struct ifnet *ifp, struct mbuf *m, struct ether_header *eh, 3409 int snap, struct llc *llc) 3410 { 3411 struct mbuf *m0; 3412 struct ip *ip; 3413 int error = -1; 3414 3415 if (m->m_len < sizeof(struct ip) && 3416 (m = m_pullup(m, sizeof(struct ip))) == NULL) 3417 goto out; 3418 ip = mtod(m, struct ip *); 3419 3420 error = ip_fragment(ip, &m, ifp->if_mtu, ifp->if_hwassist, 3421 CSUM_DELAY_IP); 3422 if (error) 3423 goto out; 3424 3425 /* walk the chain and re-add the Ethernet header */ 3426 for (m0 = m; m0; m0 = m0->m_nextpkt) { 3427 if (error == 0) { 3428 if (snap) { 3429 M_PREPEND(m0, sizeof(struct llc), M_DONTWAIT); 3430 if (m0 == NULL) { 3431 error = ENOBUFS; 3432 continue; 3433 } 3434 bcopy(llc, mtod(m0, caddr_t), 3435 sizeof(struct llc)); 3436 } 3437 M_PREPEND(m0, ETHER_HDR_LEN, M_DONTWAIT); 3438 if (m0 == NULL) { 3439 error = ENOBUFS; 3440 continue; 3441 } 3442 bcopy(eh, mtod(m0, caddr_t), ETHER_HDR_LEN); 3443 } else 3444 m_freem(m); 3445 } 3446 3447 if (error == 0) 3448 KMOD_IPSTAT_INC(ips_fragmented); 3449 3450 return (error); 3451 3452 out: 3453 if (m != NULL) 3454 m_freem(m); 3455 return (error); 3456 } 3457