1 /* $NetBSD: if_bridge.c,v 1.31 2005/06/01 19:45:34 jdc Exp $ */ 2 3 /* 4 * Copyright 2001 Wasabi Systems, Inc. 5 * All rights reserved. 6 * 7 * Written by Jason R. Thorpe for Wasabi Systems, Inc. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 3. All advertising materials mentioning features or use of this software 18 * must display the following acknowledgement: 19 * This product includes software developed for the NetBSD Project by 20 * Wasabi Systems, Inc. 21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse 22 * or promote products derived from this software without specific prior 23 * written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND 26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC 29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 35 * POSSIBILITY OF SUCH DAMAGE. 36 */ 37 38 /* 39 * Copyright (c) 1999, 2000 Jason L. Wright (jason@thought.net) 40 * All rights reserved. 41 * 42 * Redistribution and use in source and binary forms, with or without 43 * modification, are permitted provided that the following conditions 44 * are met: 45 * 1. Redistributions of source code must retain the above copyright 46 * notice, this list of conditions and the following disclaimer. 47 * 2. Redistributions in binary form must reproduce the above copyright 48 * notice, this list of conditions and the following disclaimer in the 49 * documentation and/or other materials provided with the distribution. 50 * 51 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 52 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 53 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 54 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, 55 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 56 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 57 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 58 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 59 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN 60 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 61 * POSSIBILITY OF SUCH DAMAGE. 62 * 63 * OpenBSD: if_bridge.c,v 1.60 2001/06/15 03:38:33 itojun Exp 64 */ 65 66 /* 67 * Network interface bridge support. 68 * 69 * TODO: 70 * 71 * - Currently only supports Ethernet-like interfaces (Ethernet, 72 * 802.11, VLANs on Ethernet, etc.) Figure out a nice way 73 * to bridge other types of interfaces (FDDI-FDDI, and maybe 74 * consider heterogenous bridges). 75 */ 76 77 #include <sys/cdefs.h> 78 __FBSDID("$FreeBSD$"); 79 80 #include "opt_inet.h" 81 #include "opt_inet6.h" 82 83 #include <sys/param.h> 84 #include <sys/mbuf.h> 85 #include <sys/malloc.h> 86 #include <sys/protosw.h> 87 #include <sys/systm.h> 88 #include <sys/time.h> 89 #include <sys/socket.h> /* for net/if.h */ 90 #include <sys/sockio.h> 91 #include <sys/ctype.h> /* string functions */ 92 #include <sys/kernel.h> 93 #include <sys/random.h> 94 #include <sys/syslog.h> 95 #include <sys/sysctl.h> 96 #include <vm/uma.h> 97 #include <sys/module.h> 98 #include <sys/priv.h> 99 #include <sys/proc.h> 100 #include <sys/lock.h> 101 #include <sys/mutex.h> 102 #include <sys/rwlock.h> 103 104 #include <net/bpf.h> 105 #include <net/if.h> 106 #include <net/if_clone.h> 107 #include <net/if_dl.h> 108 #include <net/if_types.h> 109 #include <net/if_var.h> 110 #include <net/pfil.h> 111 #include <net/vnet.h> 112 113 #include <netinet/in.h> /* for struct arpcom */ 114 #include <netinet/in_systm.h> 115 #include <netinet/in_var.h> 116 #include <netinet/ip.h> 117 #include <netinet/ip_var.h> 118 #ifdef INET6 119 #include <netinet/ip6.h> 120 #include <netinet6/ip6_var.h> 121 #endif 122 #if defined(INET) || defined(INET6) 123 #include <netinet/ip_carp.h> 124 #endif 125 #include <machine/in_cksum.h> 126 #include <netinet/if_ether.h> /* for struct arpcom */ 127 #include <net/bridgestp.h> 128 #include <net/if_bridgevar.h> 129 #include <net/if_llc.h> 130 #include <net/if_vlan_var.h> 131 132 #include <net/route.h> 133 #include <netinet/ip_fw.h> 134 #include <netinet/ipfw/ip_fw_private.h> 135 136 /* 137 * Size of the route hash table. Must be a power of two. 138 */ 139 #ifndef BRIDGE_RTHASH_SIZE 140 #define BRIDGE_RTHASH_SIZE 1024 141 #endif 142 143 #define BRIDGE_RTHASH_MASK (BRIDGE_RTHASH_SIZE - 1) 144 145 /* 146 * Maximum number of addresses to cache. 147 */ 148 #ifndef BRIDGE_RTABLE_MAX 149 #define BRIDGE_RTABLE_MAX 100 150 #endif 151 152 /* 153 * Timeout (in seconds) for entries learned dynamically. 154 */ 155 #ifndef BRIDGE_RTABLE_TIMEOUT 156 #define BRIDGE_RTABLE_TIMEOUT (20 * 60) /* same as ARP */ 157 #endif 158 159 /* 160 * Number of seconds between walks of the route list. 161 */ 162 #ifndef BRIDGE_RTABLE_PRUNE_PERIOD 163 #define BRIDGE_RTABLE_PRUNE_PERIOD (5 * 60) 164 #endif 165 166 /* 167 * List of capabilities to possibly mask on the member interface. 168 */ 169 #define BRIDGE_IFCAPS_MASK (IFCAP_TOE|IFCAP_TSO|IFCAP_TXCSUM) 170 171 /* 172 * List of capabilities to strip 173 */ 174 #define BRIDGE_IFCAPS_STRIP IFCAP_LRO 175 176 /* 177 * Bridge interface list entry. 178 */ 179 struct bridge_iflist { 180 LIST_ENTRY(bridge_iflist) bif_next; 181 struct ifnet *bif_ifp; /* member if */ 182 struct bstp_port bif_stp; /* STP state */ 183 uint32_t bif_flags; /* member if flags */ 184 int bif_savedcaps; /* saved capabilities */ 185 uint32_t bif_addrmax; /* max # of addresses */ 186 uint32_t bif_addrcnt; /* cur. # of addresses */ 187 uint32_t bif_addrexceeded;/* # of address violations */ 188 }; 189 190 /* 191 * Bridge route node. 192 */ 193 struct bridge_rtnode { 194 LIST_ENTRY(bridge_rtnode) brt_hash; /* hash table linkage */ 195 LIST_ENTRY(bridge_rtnode) brt_list; /* list linkage */ 196 struct bridge_iflist *brt_dst; /* destination if */ 197 unsigned long brt_expire; /* expiration time */ 198 uint8_t brt_flags; /* address flags */ 199 uint8_t brt_addr[ETHER_ADDR_LEN]; 200 uint16_t brt_vlan; /* vlan id */ 201 }; 202 #define brt_ifp brt_dst->bif_ifp 203 204 /* 205 * Software state for each bridge. 206 */ 207 struct bridge_softc { 208 struct ifnet *sc_ifp; /* make this an interface */ 209 LIST_ENTRY(bridge_softc) sc_list; 210 struct mtx sc_mtx; 211 struct cv sc_cv; 212 uint32_t sc_brtmax; /* max # of addresses */ 213 uint32_t sc_brtcnt; /* cur. # of addresses */ 214 uint32_t sc_brttimeout; /* rt timeout in seconds */ 215 struct callout sc_brcallout; /* bridge callout */ 216 uint32_t sc_iflist_ref; /* refcount for sc_iflist */ 217 uint32_t sc_iflist_xcnt; /* refcount for sc_iflist */ 218 LIST_HEAD(, bridge_iflist) sc_iflist; /* member interface list */ 219 LIST_HEAD(, bridge_rtnode) *sc_rthash; /* our forwarding table */ 220 LIST_HEAD(, bridge_rtnode) sc_rtlist; /* list version of above */ 221 uint32_t sc_rthash_key; /* key for hash */ 222 LIST_HEAD(, bridge_iflist) sc_spanlist; /* span ports list */ 223 struct bstp_state sc_stp; /* STP state */ 224 uint32_t sc_brtexceeded; /* # of cache drops */ 225 struct ifnet *sc_ifaddr; /* member mac copied from */ 226 u_char sc_defaddr[6]; /* Default MAC address */ 227 }; 228 229 static struct mtx bridge_list_mtx; 230 eventhandler_tag bridge_detach_cookie = NULL; 231 232 int bridge_rtable_prune_period = BRIDGE_RTABLE_PRUNE_PERIOD; 233 234 uma_zone_t bridge_rtnode_zone; 235 236 static int bridge_clone_create(struct if_clone *, int, caddr_t); 237 static void bridge_clone_destroy(struct ifnet *); 238 239 static int bridge_ioctl(struct ifnet *, u_long, caddr_t); 240 static void bridge_mutecaps(struct bridge_softc *); 241 static void bridge_set_ifcap(struct bridge_softc *, struct bridge_iflist *, 242 int); 243 static void bridge_ifdetach(void *arg __unused, struct ifnet *); 244 static void bridge_init(void *); 245 static void bridge_dummynet(struct mbuf *, struct ifnet *); 246 static void bridge_stop(struct ifnet *, int); 247 static void bridge_start(struct ifnet *); 248 static struct mbuf *bridge_input(struct ifnet *, struct mbuf *); 249 static int bridge_output(struct ifnet *, struct mbuf *, struct sockaddr *, 250 struct rtentry *); 251 static void bridge_enqueue(struct bridge_softc *, struct ifnet *, 252 struct mbuf *); 253 static void bridge_rtdelete(struct bridge_softc *, struct ifnet *ifp, int); 254 255 static void bridge_forward(struct bridge_softc *, struct bridge_iflist *, 256 struct mbuf *m); 257 258 static void bridge_timer(void *); 259 260 static void bridge_broadcast(struct bridge_softc *, struct ifnet *, 261 struct mbuf *, int); 262 static void bridge_span(struct bridge_softc *, struct mbuf *); 263 264 static int bridge_rtupdate(struct bridge_softc *, const uint8_t *, 265 uint16_t, struct bridge_iflist *, int, uint8_t); 266 static struct ifnet *bridge_rtlookup(struct bridge_softc *, const uint8_t *, 267 uint16_t); 268 static void bridge_rttrim(struct bridge_softc *); 269 static void bridge_rtage(struct bridge_softc *); 270 static void bridge_rtflush(struct bridge_softc *, int); 271 static int bridge_rtdaddr(struct bridge_softc *, const uint8_t *, 272 uint16_t); 273 274 static int bridge_rtable_init(struct bridge_softc *); 275 static void bridge_rtable_fini(struct bridge_softc *); 276 277 static int bridge_rtnode_addr_cmp(const uint8_t *, const uint8_t *); 278 static struct bridge_rtnode *bridge_rtnode_lookup(struct bridge_softc *, 279 const uint8_t *, uint16_t); 280 static int bridge_rtnode_insert(struct bridge_softc *, 281 struct bridge_rtnode *); 282 static void bridge_rtnode_destroy(struct bridge_softc *, 283 struct bridge_rtnode *); 284 static void bridge_rtable_expire(struct ifnet *, int); 285 static void bridge_state_change(struct ifnet *, int); 286 287 static struct bridge_iflist *bridge_lookup_member(struct bridge_softc *, 288 const char *name); 289 static struct bridge_iflist *bridge_lookup_member_if(struct bridge_softc *, 290 struct ifnet *ifp); 291 static void bridge_delete_member(struct bridge_softc *, 292 struct bridge_iflist *, int); 293 static void bridge_delete_span(struct bridge_softc *, 294 struct bridge_iflist *); 295 296 static int bridge_ioctl_add(struct bridge_softc *, void *); 297 static int bridge_ioctl_del(struct bridge_softc *, void *); 298 static int bridge_ioctl_gifflags(struct bridge_softc *, void *); 299 static int bridge_ioctl_sifflags(struct bridge_softc *, void *); 300 static int bridge_ioctl_scache(struct bridge_softc *, void *); 301 static int bridge_ioctl_gcache(struct bridge_softc *, void *); 302 static int bridge_ioctl_gifs(struct bridge_softc *, void *); 303 static int bridge_ioctl_rts(struct bridge_softc *, void *); 304 static int bridge_ioctl_saddr(struct bridge_softc *, void *); 305 static int bridge_ioctl_sto(struct bridge_softc *, void *); 306 static int bridge_ioctl_gto(struct bridge_softc *, void *); 307 static int bridge_ioctl_daddr(struct bridge_softc *, void *); 308 static int bridge_ioctl_flush(struct bridge_softc *, void *); 309 static int bridge_ioctl_gpri(struct bridge_softc *, void *); 310 static int bridge_ioctl_spri(struct bridge_softc *, void *); 311 static int bridge_ioctl_ght(struct bridge_softc *, void *); 312 static int bridge_ioctl_sht(struct bridge_softc *, void *); 313 static int bridge_ioctl_gfd(struct bridge_softc *, void *); 314 static int bridge_ioctl_sfd(struct bridge_softc *, void *); 315 static int bridge_ioctl_gma(struct bridge_softc *, void *); 316 static int bridge_ioctl_sma(struct bridge_softc *, void *); 317 static int bridge_ioctl_sifprio(struct bridge_softc *, void *); 318 static int bridge_ioctl_sifcost(struct bridge_softc *, void *); 319 static int bridge_ioctl_sifmaxaddr(struct bridge_softc *, void *); 320 static int bridge_ioctl_addspan(struct bridge_softc *, void *); 321 static int bridge_ioctl_delspan(struct bridge_softc *, void *); 322 static int bridge_ioctl_gbparam(struct bridge_softc *, void *); 323 static int bridge_ioctl_grte(struct bridge_softc *, void *); 324 static int bridge_ioctl_gifsstp(struct bridge_softc *, void *); 325 static int bridge_ioctl_sproto(struct bridge_softc *, void *); 326 static int bridge_ioctl_stxhc(struct bridge_softc *, void *); 327 static int bridge_pfil(struct mbuf **, struct ifnet *, struct ifnet *, 328 int); 329 static int bridge_ip_checkbasic(struct mbuf **mp); 330 #ifdef INET6 331 static int bridge_ip6_checkbasic(struct mbuf **mp); 332 #endif /* INET6 */ 333 static int bridge_fragment(struct ifnet *, struct mbuf *, 334 struct ether_header *, int, struct llc *); 335 336 /* The default bridge vlan is 1 (IEEE 802.1Q-2003 Table 9-2) */ 337 #define VLANTAGOF(_m) \ 338 (_m->m_flags & M_VLANTAG) ? EVL_VLANOFTAG(_m->m_pkthdr.ether_vtag) : 1 339 340 static struct bstp_cb_ops bridge_ops = { 341 .bcb_state = bridge_state_change, 342 .bcb_rtage = bridge_rtable_expire 343 }; 344 345 SYSCTL_DECL(_net_link); 346 SYSCTL_NODE(_net_link, IFT_BRIDGE, bridge, CTLFLAG_RW, 0, "Bridge"); 347 348 static int pfil_onlyip = 1; /* only pass IP[46] packets when pfil is enabled */ 349 static int pfil_bridge = 1; /* run pfil hooks on the bridge interface */ 350 static int pfil_member = 1; /* run pfil hooks on the member interface */ 351 static int pfil_ipfw = 0; /* layer2 filter with ipfw */ 352 static int pfil_ipfw_arp = 0; /* layer2 filter with ipfw */ 353 static int pfil_local_phys = 0; /* run pfil hooks on the physical interface for 354 locally destined packets */ 355 static int log_stp = 0; /* log STP state changes */ 356 static int bridge_inherit_mac = 0; /* share MAC with first bridge member */ 357 SYSCTL_INT(_net_link_bridge, OID_AUTO, pfil_onlyip, CTLFLAG_RW, 358 &pfil_onlyip, 0, "Only pass IP packets when pfil is enabled"); 359 SYSCTL_INT(_net_link_bridge, OID_AUTO, ipfw_arp, CTLFLAG_RW, 360 &pfil_ipfw_arp, 0, "Filter ARP packets through IPFW layer2"); 361 SYSCTL_INT(_net_link_bridge, OID_AUTO, pfil_bridge, CTLFLAG_RW, 362 &pfil_bridge, 0, "Packet filter on the bridge interface"); 363 SYSCTL_INT(_net_link_bridge, OID_AUTO, pfil_member, CTLFLAG_RW, 364 &pfil_member, 0, "Packet filter on the member interface"); 365 SYSCTL_INT(_net_link_bridge, OID_AUTO, pfil_local_phys, CTLFLAG_RW, 366 &pfil_local_phys, 0, 367 "Packet filter on the physical interface for locally destined packets"); 368 SYSCTL_INT(_net_link_bridge, OID_AUTO, log_stp, CTLFLAG_RW, 369 &log_stp, 0, "Log STP state changes"); 370 SYSCTL_INT(_net_link_bridge, OID_AUTO, inherit_mac, CTLFLAG_RW, 371 &bridge_inherit_mac, 0, 372 "Inherit MAC address from the first bridge member"); 373 374 struct bridge_control { 375 int (*bc_func)(struct bridge_softc *, void *); 376 int bc_argsize; 377 int bc_flags; 378 }; 379 380 #define BC_F_COPYIN 0x01 /* copy arguments in */ 381 #define BC_F_COPYOUT 0x02 /* copy arguments out */ 382 #define BC_F_SUSER 0x04 /* do super-user check */ 383 384 const struct bridge_control bridge_control_table[] = { 385 { bridge_ioctl_add, sizeof(struct ifbreq), 386 BC_F_COPYIN|BC_F_SUSER }, 387 { bridge_ioctl_del, sizeof(struct ifbreq), 388 BC_F_COPYIN|BC_F_SUSER }, 389 390 { bridge_ioctl_gifflags, sizeof(struct ifbreq), 391 BC_F_COPYIN|BC_F_COPYOUT }, 392 { bridge_ioctl_sifflags, sizeof(struct ifbreq), 393 BC_F_COPYIN|BC_F_SUSER }, 394 395 { bridge_ioctl_scache, sizeof(struct ifbrparam), 396 BC_F_COPYIN|BC_F_SUSER }, 397 { bridge_ioctl_gcache, sizeof(struct ifbrparam), 398 BC_F_COPYOUT }, 399 400 { bridge_ioctl_gifs, sizeof(struct ifbifconf), 401 BC_F_COPYIN|BC_F_COPYOUT }, 402 { bridge_ioctl_rts, sizeof(struct ifbaconf), 403 BC_F_COPYIN|BC_F_COPYOUT }, 404 405 { bridge_ioctl_saddr, sizeof(struct ifbareq), 406 BC_F_COPYIN|BC_F_SUSER }, 407 408 { bridge_ioctl_sto, sizeof(struct ifbrparam), 409 BC_F_COPYIN|BC_F_SUSER }, 410 { bridge_ioctl_gto, sizeof(struct ifbrparam), 411 BC_F_COPYOUT }, 412 413 { bridge_ioctl_daddr, sizeof(struct ifbareq), 414 BC_F_COPYIN|BC_F_SUSER }, 415 416 { bridge_ioctl_flush, sizeof(struct ifbreq), 417 BC_F_COPYIN|BC_F_SUSER }, 418 419 { bridge_ioctl_gpri, sizeof(struct ifbrparam), 420 BC_F_COPYOUT }, 421 { bridge_ioctl_spri, sizeof(struct ifbrparam), 422 BC_F_COPYIN|BC_F_SUSER }, 423 424 { bridge_ioctl_ght, sizeof(struct ifbrparam), 425 BC_F_COPYOUT }, 426 { bridge_ioctl_sht, sizeof(struct ifbrparam), 427 BC_F_COPYIN|BC_F_SUSER }, 428 429 { bridge_ioctl_gfd, sizeof(struct ifbrparam), 430 BC_F_COPYOUT }, 431 { bridge_ioctl_sfd, sizeof(struct ifbrparam), 432 BC_F_COPYIN|BC_F_SUSER }, 433 434 { bridge_ioctl_gma, sizeof(struct ifbrparam), 435 BC_F_COPYOUT }, 436 { bridge_ioctl_sma, sizeof(struct ifbrparam), 437 BC_F_COPYIN|BC_F_SUSER }, 438 439 { bridge_ioctl_sifprio, sizeof(struct ifbreq), 440 BC_F_COPYIN|BC_F_SUSER }, 441 442 { bridge_ioctl_sifcost, sizeof(struct ifbreq), 443 BC_F_COPYIN|BC_F_SUSER }, 444 445 { bridge_ioctl_addspan, sizeof(struct ifbreq), 446 BC_F_COPYIN|BC_F_SUSER }, 447 { bridge_ioctl_delspan, sizeof(struct ifbreq), 448 BC_F_COPYIN|BC_F_SUSER }, 449 450 { bridge_ioctl_gbparam, sizeof(struct ifbropreq), 451 BC_F_COPYOUT }, 452 453 { bridge_ioctl_grte, sizeof(struct ifbrparam), 454 BC_F_COPYOUT }, 455 456 { bridge_ioctl_gifsstp, sizeof(struct ifbpstpconf), 457 BC_F_COPYIN|BC_F_COPYOUT }, 458 459 { bridge_ioctl_sproto, sizeof(struct ifbrparam), 460 BC_F_COPYIN|BC_F_SUSER }, 461 462 { bridge_ioctl_stxhc, sizeof(struct ifbrparam), 463 BC_F_COPYIN|BC_F_SUSER }, 464 465 { bridge_ioctl_sifmaxaddr, sizeof(struct ifbreq), 466 BC_F_COPYIN|BC_F_SUSER }, 467 468 }; 469 const int bridge_control_table_size = 470 sizeof(bridge_control_table) / sizeof(bridge_control_table[0]); 471 472 LIST_HEAD(, bridge_softc) bridge_list; 473 474 IFC_SIMPLE_DECLARE(bridge, 0); 475 476 static int 477 bridge_modevent(module_t mod, int type, void *data) 478 { 479 480 switch (type) { 481 case MOD_LOAD: 482 mtx_init(&bridge_list_mtx, "if_bridge list", NULL, MTX_DEF); 483 if_clone_attach(&bridge_cloner); 484 bridge_rtnode_zone = uma_zcreate("bridge_rtnode", 485 sizeof(struct bridge_rtnode), NULL, NULL, NULL, NULL, 486 UMA_ALIGN_PTR, 0); 487 LIST_INIT(&bridge_list); 488 bridge_input_p = bridge_input; 489 bridge_output_p = bridge_output; 490 bridge_dn_p = bridge_dummynet; 491 bridge_detach_cookie = EVENTHANDLER_REGISTER( 492 ifnet_departure_event, bridge_ifdetach, NULL, 493 EVENTHANDLER_PRI_ANY); 494 break; 495 case MOD_UNLOAD: 496 EVENTHANDLER_DEREGISTER(ifnet_departure_event, 497 bridge_detach_cookie); 498 if_clone_detach(&bridge_cloner); 499 uma_zdestroy(bridge_rtnode_zone); 500 bridge_input_p = NULL; 501 bridge_output_p = NULL; 502 bridge_dn_p = NULL; 503 mtx_destroy(&bridge_list_mtx); 504 break; 505 default: 506 return (EOPNOTSUPP); 507 } 508 return (0); 509 } 510 511 static moduledata_t bridge_mod = { 512 "if_bridge", 513 bridge_modevent, 514 0 515 }; 516 517 DECLARE_MODULE(if_bridge, bridge_mod, SI_SUB_PSEUDO, SI_ORDER_ANY); 518 MODULE_DEPEND(if_bridge, bridgestp, 1, 1, 1); 519 520 /* 521 * handler for net.link.bridge.pfil_ipfw 522 */ 523 static int 524 sysctl_pfil_ipfw(SYSCTL_HANDLER_ARGS) 525 { 526 int enable = pfil_ipfw; 527 int error; 528 529 error = sysctl_handle_int(oidp, &enable, 0, req); 530 enable = (enable) ? 1 : 0; 531 532 if (enable != pfil_ipfw) { 533 pfil_ipfw = enable; 534 535 /* 536 * Disable pfil so that ipfw doesnt run twice, if the user 537 * really wants both then they can re-enable pfil_bridge and/or 538 * pfil_member. Also allow non-ip packets as ipfw can filter by 539 * layer2 type. 540 */ 541 if (pfil_ipfw) { 542 pfil_onlyip = 0; 543 pfil_bridge = 0; 544 pfil_member = 0; 545 } 546 } 547 548 return (error); 549 } 550 SYSCTL_PROC(_net_link_bridge, OID_AUTO, ipfw, CTLTYPE_INT|CTLFLAG_RW, 551 &pfil_ipfw, 0, &sysctl_pfil_ipfw, "I", "Layer2 filter with IPFW"); 552 553 /* 554 * bridge_clone_create: 555 * 556 * Create a new bridge instance. 557 */ 558 static int 559 bridge_clone_create(struct if_clone *ifc, int unit, caddr_t params) 560 { 561 struct bridge_softc *sc, *sc2; 562 struct ifnet *bifp, *ifp; 563 int retry; 564 565 sc = malloc(sizeof(*sc), M_DEVBUF, M_WAITOK|M_ZERO); 566 ifp = sc->sc_ifp = if_alloc(IFT_ETHER); 567 if (ifp == NULL) { 568 free(sc, M_DEVBUF); 569 return (ENOSPC); 570 } 571 572 BRIDGE_LOCK_INIT(sc); 573 sc->sc_brtmax = BRIDGE_RTABLE_MAX; 574 sc->sc_brttimeout = BRIDGE_RTABLE_TIMEOUT; 575 576 /* Initialize our routing table. */ 577 bridge_rtable_init(sc); 578 579 callout_init_mtx(&sc->sc_brcallout, &sc->sc_mtx, 0); 580 581 LIST_INIT(&sc->sc_iflist); 582 LIST_INIT(&sc->sc_spanlist); 583 584 ifp->if_softc = sc; 585 if_initname(ifp, ifc->ifc_name, unit); 586 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 587 ifp->if_ioctl = bridge_ioctl; 588 ifp->if_start = bridge_start; 589 ifp->if_init = bridge_init; 590 ifp->if_type = IFT_BRIDGE; 591 IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen); 592 ifp->if_snd.ifq_drv_maxlen = ifqmaxlen; 593 IFQ_SET_READY(&ifp->if_snd); 594 595 /* 596 * Generate a random ethernet address with a locally administered 597 * address. 598 * 599 * Since we are using random ethernet addresses for the bridge, it is 600 * possible that we might have address collisions, so make sure that 601 * this hardware address isn't already in use on another bridge. 602 */ 603 for (retry = 1; retry != 0;) { 604 arc4rand(sc->sc_defaddr, ETHER_ADDR_LEN, 1); 605 sc->sc_defaddr[0] &= ~1; /* clear multicast bit */ 606 sc->sc_defaddr[0] |= 2; /* set the LAA bit */ 607 retry = 0; 608 mtx_lock(&bridge_list_mtx); 609 LIST_FOREACH(sc2, &bridge_list, sc_list) { 610 bifp = sc2->sc_ifp; 611 if (memcmp(sc->sc_defaddr, 612 IF_LLADDR(bifp), ETHER_ADDR_LEN) == 0) 613 retry = 1; 614 } 615 mtx_unlock(&bridge_list_mtx); 616 } 617 618 bstp_attach(&sc->sc_stp, &bridge_ops); 619 ether_ifattach(ifp, sc->sc_defaddr); 620 /* Now undo some of the damage... */ 621 ifp->if_baudrate = 0; 622 ifp->if_type = IFT_BRIDGE; 623 624 mtx_lock(&bridge_list_mtx); 625 LIST_INSERT_HEAD(&bridge_list, sc, sc_list); 626 mtx_unlock(&bridge_list_mtx); 627 628 return (0); 629 } 630 631 /* 632 * bridge_clone_destroy: 633 * 634 * Destroy a bridge instance. 635 */ 636 static void 637 bridge_clone_destroy(struct ifnet *ifp) 638 { 639 struct bridge_softc *sc = ifp->if_softc; 640 struct bridge_iflist *bif; 641 642 BRIDGE_LOCK(sc); 643 644 bridge_stop(ifp, 1); 645 ifp->if_flags &= ~IFF_UP; 646 647 while ((bif = LIST_FIRST(&sc->sc_iflist)) != NULL) 648 bridge_delete_member(sc, bif, 0); 649 650 while ((bif = LIST_FIRST(&sc->sc_spanlist)) != NULL) { 651 bridge_delete_span(sc, bif); 652 } 653 654 BRIDGE_UNLOCK(sc); 655 656 callout_drain(&sc->sc_brcallout); 657 658 mtx_lock(&bridge_list_mtx); 659 LIST_REMOVE(sc, sc_list); 660 mtx_unlock(&bridge_list_mtx); 661 662 bstp_detach(&sc->sc_stp); 663 ether_ifdetach(ifp); 664 if_free_type(ifp, IFT_ETHER); 665 666 /* Tear down the routing table. */ 667 bridge_rtable_fini(sc); 668 669 BRIDGE_LOCK_DESTROY(sc); 670 free(sc, M_DEVBUF); 671 } 672 673 /* 674 * bridge_ioctl: 675 * 676 * Handle a control request from the operator. 677 */ 678 static int 679 bridge_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 680 { 681 struct bridge_softc *sc = ifp->if_softc; 682 struct ifreq *ifr = (struct ifreq *)data; 683 struct bridge_iflist *bif; 684 struct thread *td = curthread; 685 union { 686 struct ifbreq ifbreq; 687 struct ifbifconf ifbifconf; 688 struct ifbareq ifbareq; 689 struct ifbaconf ifbaconf; 690 struct ifbrparam ifbrparam; 691 struct ifbropreq ifbropreq; 692 } args; 693 struct ifdrv *ifd = (struct ifdrv *) data; 694 const struct bridge_control *bc; 695 int error = 0; 696 697 switch (cmd) { 698 699 case SIOCADDMULTI: 700 case SIOCDELMULTI: 701 break; 702 703 case SIOCGDRVSPEC: 704 case SIOCSDRVSPEC: 705 if (ifd->ifd_cmd >= bridge_control_table_size) { 706 error = EINVAL; 707 break; 708 } 709 bc = &bridge_control_table[ifd->ifd_cmd]; 710 711 if (cmd == SIOCGDRVSPEC && 712 (bc->bc_flags & BC_F_COPYOUT) == 0) { 713 error = EINVAL; 714 break; 715 } 716 else if (cmd == SIOCSDRVSPEC && 717 (bc->bc_flags & BC_F_COPYOUT) != 0) { 718 error = EINVAL; 719 break; 720 } 721 722 if (bc->bc_flags & BC_F_SUSER) { 723 error = priv_check(td, PRIV_NET_BRIDGE); 724 if (error) 725 break; 726 } 727 728 if (ifd->ifd_len != bc->bc_argsize || 729 ifd->ifd_len > sizeof(args)) { 730 error = EINVAL; 731 break; 732 } 733 734 bzero(&args, sizeof(args)); 735 if (bc->bc_flags & BC_F_COPYIN) { 736 error = copyin(ifd->ifd_data, &args, ifd->ifd_len); 737 if (error) 738 break; 739 } 740 741 BRIDGE_LOCK(sc); 742 error = (*bc->bc_func)(sc, &args); 743 BRIDGE_UNLOCK(sc); 744 if (error) 745 break; 746 747 if (bc->bc_flags & BC_F_COPYOUT) 748 error = copyout(&args, ifd->ifd_data, ifd->ifd_len); 749 750 break; 751 752 case SIOCSIFFLAGS: 753 if (!(ifp->if_flags & IFF_UP) && 754 (ifp->if_drv_flags & IFF_DRV_RUNNING)) { 755 /* 756 * If interface is marked down and it is running, 757 * then stop and disable it. 758 */ 759 BRIDGE_LOCK(sc); 760 bridge_stop(ifp, 1); 761 BRIDGE_UNLOCK(sc); 762 } else if ((ifp->if_flags & IFF_UP) && 763 !(ifp->if_drv_flags & IFF_DRV_RUNNING)) { 764 /* 765 * If interface is marked up and it is stopped, then 766 * start it. 767 */ 768 (*ifp->if_init)(sc); 769 } 770 break; 771 772 case SIOCSIFMTU: 773 if (ifr->ifr_mtu < 576) { 774 error = EINVAL; 775 break; 776 } 777 if (LIST_EMPTY(&sc->sc_iflist)) { 778 sc->sc_ifp->if_mtu = ifr->ifr_mtu; 779 break; 780 } 781 BRIDGE_LOCK(sc); 782 LIST_FOREACH(bif, &sc->sc_iflist, bif_next) { 783 if (bif->bif_ifp->if_mtu != ifr->ifr_mtu) { 784 log(LOG_NOTICE, "%s: invalid MTU: %lu(%s)" 785 " != %d\n", sc->sc_ifp->if_xname, 786 bif->bif_ifp->if_mtu, 787 bif->bif_ifp->if_xname, ifr->ifr_mtu); 788 error = EINVAL; 789 break; 790 } 791 } 792 if (!error) 793 sc->sc_ifp->if_mtu = ifr->ifr_mtu; 794 BRIDGE_UNLOCK(sc); 795 break; 796 default: 797 /* 798 * drop the lock as ether_ioctl() will call bridge_start() and 799 * cause the lock to be recursed. 800 */ 801 error = ether_ioctl(ifp, cmd, data); 802 break; 803 } 804 805 return (error); 806 } 807 808 /* 809 * bridge_mutecaps: 810 * 811 * Clear or restore unwanted capabilities on the member interface 812 */ 813 static void 814 bridge_mutecaps(struct bridge_softc *sc) 815 { 816 struct bridge_iflist *bif; 817 int enabled, mask; 818 819 /* Initial bitmask of capabilities to test */ 820 mask = BRIDGE_IFCAPS_MASK; 821 822 LIST_FOREACH(bif, &sc->sc_iflist, bif_next) { 823 /* Every member must support it or its disabled */ 824 mask &= bif->bif_savedcaps; 825 } 826 827 LIST_FOREACH(bif, &sc->sc_iflist, bif_next) { 828 enabled = bif->bif_ifp->if_capenable; 829 enabled &= ~BRIDGE_IFCAPS_STRIP; 830 /* strip off mask bits and enable them again if allowed */ 831 enabled &= ~BRIDGE_IFCAPS_MASK; 832 enabled |= mask; 833 bridge_set_ifcap(sc, bif, enabled); 834 } 835 836 } 837 838 static void 839 bridge_set_ifcap(struct bridge_softc *sc, struct bridge_iflist *bif, int set) 840 { 841 struct ifnet *ifp = bif->bif_ifp; 842 struct ifreq ifr; 843 int error; 844 845 bzero(&ifr, sizeof(ifr)); 846 ifr.ifr_reqcap = set; 847 848 if (ifp->if_capenable != set) { 849 error = (*ifp->if_ioctl)(ifp, SIOCSIFCAP, (caddr_t)&ifr); 850 if (error) 851 if_printf(sc->sc_ifp, 852 "error setting interface capabilities on %s\n", 853 ifp->if_xname); 854 } 855 } 856 857 /* 858 * bridge_lookup_member: 859 * 860 * Lookup a bridge member interface. 861 */ 862 static struct bridge_iflist * 863 bridge_lookup_member(struct bridge_softc *sc, const char *name) 864 { 865 struct bridge_iflist *bif; 866 struct ifnet *ifp; 867 868 BRIDGE_LOCK_ASSERT(sc); 869 870 LIST_FOREACH(bif, &sc->sc_iflist, bif_next) { 871 ifp = bif->bif_ifp; 872 if (strcmp(ifp->if_xname, name) == 0) 873 return (bif); 874 } 875 876 return (NULL); 877 } 878 879 /* 880 * bridge_lookup_member_if: 881 * 882 * Lookup a bridge member interface by ifnet*. 883 */ 884 static struct bridge_iflist * 885 bridge_lookup_member_if(struct bridge_softc *sc, struct ifnet *member_ifp) 886 { 887 struct bridge_iflist *bif; 888 889 BRIDGE_LOCK_ASSERT(sc); 890 891 LIST_FOREACH(bif, &sc->sc_iflist, bif_next) { 892 if (bif->bif_ifp == member_ifp) 893 return (bif); 894 } 895 896 return (NULL); 897 } 898 899 /* 900 * bridge_delete_member: 901 * 902 * Delete the specified member interface. 903 */ 904 static void 905 bridge_delete_member(struct bridge_softc *sc, struct bridge_iflist *bif, 906 int gone) 907 { 908 struct ifnet *ifs = bif->bif_ifp; 909 struct ifnet *fif = NULL; 910 911 BRIDGE_LOCK_ASSERT(sc); 912 913 if (bif->bif_flags & IFBIF_STP) 914 bstp_disable(&bif->bif_stp); 915 916 ifs->if_bridge = NULL; 917 BRIDGE_XLOCK(sc); 918 LIST_REMOVE(bif, bif_next); 919 BRIDGE_XDROP(sc); 920 921 /* 922 * If removing the interface that gave the bridge its mac address, set 923 * the mac address of the bridge to the address of the next member, or 924 * to its default address if no members are left. 925 */ 926 if (bridge_inherit_mac && sc->sc_ifaddr == ifs) { 927 if (LIST_EMPTY(&sc->sc_iflist)) { 928 bcopy(sc->sc_defaddr, 929 IF_LLADDR(sc->sc_ifp), ETHER_ADDR_LEN); 930 sc->sc_ifaddr = NULL; 931 } else { 932 fif = LIST_FIRST(&sc->sc_iflist)->bif_ifp; 933 bcopy(IF_LLADDR(fif), 934 IF_LLADDR(sc->sc_ifp), ETHER_ADDR_LEN); 935 sc->sc_ifaddr = fif; 936 } 937 EVENTHANDLER_INVOKE(iflladdr_event, sc->sc_ifp); 938 } 939 940 bridge_mutecaps(sc); /* recalcuate now this interface is removed */ 941 bridge_rtdelete(sc, ifs, IFBF_FLUSHALL); 942 KASSERT(bif->bif_addrcnt == 0, 943 ("%s: %d bridge routes referenced", __func__, bif->bif_addrcnt)); 944 945 BRIDGE_UNLOCK(sc); 946 if (!gone) { 947 switch (ifs->if_type) { 948 case IFT_ETHER: 949 case IFT_L2VLAN: 950 /* 951 * Take the interface out of promiscuous mode. 952 */ 953 (void) ifpromisc(ifs, 0); 954 break; 955 956 case IFT_GIF: 957 break; 958 959 default: 960 #ifdef DIAGNOSTIC 961 panic("bridge_delete_member: impossible"); 962 #endif 963 break; 964 } 965 /* reneable any interface capabilities */ 966 bridge_set_ifcap(sc, bif, bif->bif_savedcaps); 967 } 968 bstp_destroy(&bif->bif_stp); /* prepare to free */ 969 BRIDGE_LOCK(sc); 970 free(bif, M_DEVBUF); 971 } 972 973 /* 974 * bridge_delete_span: 975 * 976 * Delete the specified span interface. 977 */ 978 static void 979 bridge_delete_span(struct bridge_softc *sc, struct bridge_iflist *bif) 980 { 981 BRIDGE_LOCK_ASSERT(sc); 982 983 KASSERT(bif->bif_ifp->if_bridge == NULL, 984 ("%s: not a span interface", __func__)); 985 986 LIST_REMOVE(bif, bif_next); 987 free(bif, M_DEVBUF); 988 } 989 990 static int 991 bridge_ioctl_add(struct bridge_softc *sc, void *arg) 992 { 993 struct ifbreq *req = arg; 994 struct bridge_iflist *bif = NULL; 995 struct ifnet *ifs; 996 int error = 0; 997 998 ifs = ifunit(req->ifbr_ifsname); 999 if (ifs == NULL) 1000 return (ENOENT); 1001 if (ifs->if_ioctl == NULL) /* must be supported */ 1002 return (EINVAL); 1003 1004 /* If it's in the span list, it can't be a member. */ 1005 LIST_FOREACH(bif, &sc->sc_spanlist, bif_next) 1006 if (ifs == bif->bif_ifp) 1007 return (EBUSY); 1008 1009 if (ifs->if_bridge == sc) 1010 return (EEXIST); 1011 1012 if (ifs->if_bridge != NULL) 1013 return (EBUSY); 1014 1015 bif = malloc(sizeof(*bif), M_DEVBUF, M_NOWAIT|M_ZERO); 1016 if (bif == NULL) 1017 return (ENOMEM); 1018 1019 bif->bif_ifp = ifs; 1020 bif->bif_flags = IFBIF_LEARNING | IFBIF_DISCOVER; 1021 bif->bif_savedcaps = ifs->if_capenable; 1022 1023 switch (ifs->if_type) { 1024 case IFT_ETHER: 1025 case IFT_L2VLAN: 1026 case IFT_GIF: 1027 /* permitted interface types */ 1028 break; 1029 default: 1030 error = EINVAL; 1031 goto out; 1032 } 1033 1034 /* Allow the first Ethernet member to define the MTU */ 1035 if (LIST_EMPTY(&sc->sc_iflist)) 1036 sc->sc_ifp->if_mtu = ifs->if_mtu; 1037 else if (sc->sc_ifp->if_mtu != ifs->if_mtu) { 1038 if_printf(sc->sc_ifp, "invalid MTU: %lu(%s) != %lu\n", 1039 ifs->if_mtu, ifs->if_xname, sc->sc_ifp->if_mtu); 1040 error = EINVAL; 1041 goto out; 1042 } 1043 1044 /* 1045 * Assign the interface's MAC address to the bridge if it's the first 1046 * member and the MAC address of the bridge has not been changed from 1047 * the default randomly generated one. 1048 */ 1049 if (bridge_inherit_mac && LIST_EMPTY(&sc->sc_iflist) && 1050 !memcmp(IF_LLADDR(sc->sc_ifp), sc->sc_defaddr, ETHER_ADDR_LEN)) { 1051 bcopy(IF_LLADDR(ifs), IF_LLADDR(sc->sc_ifp), ETHER_ADDR_LEN); 1052 sc->sc_ifaddr = ifs; 1053 EVENTHANDLER_INVOKE(iflladdr_event, sc->sc_ifp); 1054 } 1055 1056 ifs->if_bridge = sc; 1057 bstp_create(&sc->sc_stp, &bif->bif_stp, bif->bif_ifp); 1058 /* 1059 * XXX: XLOCK HERE!?! 1060 * 1061 * NOTE: insert_***HEAD*** should be safe for the traversals. 1062 */ 1063 LIST_INSERT_HEAD(&sc->sc_iflist, bif, bif_next); 1064 1065 /* Set interface capabilities to the intersection set of all members */ 1066 bridge_mutecaps(sc); 1067 1068 switch (ifs->if_type) { 1069 case IFT_ETHER: 1070 case IFT_L2VLAN: 1071 /* 1072 * Place the interface into promiscuous mode. 1073 */ 1074 BRIDGE_UNLOCK(sc); 1075 error = ifpromisc(ifs, 1); 1076 BRIDGE_LOCK(sc); 1077 break; 1078 } 1079 if (error) 1080 bridge_delete_member(sc, bif, 0); 1081 out: 1082 if (error) { 1083 if (bif != NULL) 1084 free(bif, M_DEVBUF); 1085 } 1086 return (error); 1087 } 1088 1089 static int 1090 bridge_ioctl_del(struct bridge_softc *sc, void *arg) 1091 { 1092 struct ifbreq *req = arg; 1093 struct bridge_iflist *bif; 1094 1095 bif = bridge_lookup_member(sc, req->ifbr_ifsname); 1096 if (bif == NULL) 1097 return (ENOENT); 1098 1099 bridge_delete_member(sc, bif, 0); 1100 1101 return (0); 1102 } 1103 1104 static int 1105 bridge_ioctl_gifflags(struct bridge_softc *sc, void *arg) 1106 { 1107 struct ifbreq *req = arg; 1108 struct bridge_iflist *bif; 1109 struct bstp_port *bp; 1110 1111 bif = bridge_lookup_member(sc, req->ifbr_ifsname); 1112 if (bif == NULL) 1113 return (ENOENT); 1114 1115 bp = &bif->bif_stp; 1116 req->ifbr_ifsflags = bif->bif_flags; 1117 req->ifbr_state = bp->bp_state; 1118 req->ifbr_priority = bp->bp_priority; 1119 req->ifbr_path_cost = bp->bp_path_cost; 1120 req->ifbr_portno = bif->bif_ifp->if_index & 0xfff; 1121 req->ifbr_proto = bp->bp_protover; 1122 req->ifbr_role = bp->bp_role; 1123 req->ifbr_stpflags = bp->bp_flags; 1124 req->ifbr_addrcnt = bif->bif_addrcnt; 1125 req->ifbr_addrmax = bif->bif_addrmax; 1126 req->ifbr_addrexceeded = bif->bif_addrexceeded; 1127 1128 /* Copy STP state options as flags */ 1129 if (bp->bp_operedge) 1130 req->ifbr_ifsflags |= IFBIF_BSTP_EDGE; 1131 if (bp->bp_flags & BSTP_PORT_AUTOEDGE) 1132 req->ifbr_ifsflags |= IFBIF_BSTP_AUTOEDGE; 1133 if (bp->bp_ptp_link) 1134 req->ifbr_ifsflags |= IFBIF_BSTP_PTP; 1135 if (bp->bp_flags & BSTP_PORT_AUTOPTP) 1136 req->ifbr_ifsflags |= IFBIF_BSTP_AUTOPTP; 1137 if (bp->bp_flags & BSTP_PORT_ADMEDGE) 1138 req->ifbr_ifsflags |= IFBIF_BSTP_ADMEDGE; 1139 if (bp->bp_flags & BSTP_PORT_ADMCOST) 1140 req->ifbr_ifsflags |= IFBIF_BSTP_ADMCOST; 1141 return (0); 1142 } 1143 1144 static int 1145 bridge_ioctl_sifflags(struct bridge_softc *sc, void *arg) 1146 { 1147 struct ifbreq *req = arg; 1148 struct bridge_iflist *bif; 1149 struct bstp_port *bp; 1150 int error; 1151 1152 bif = bridge_lookup_member(sc, req->ifbr_ifsname); 1153 if (bif == NULL) 1154 return (ENOENT); 1155 bp = &bif->bif_stp; 1156 1157 if (req->ifbr_ifsflags & IFBIF_SPAN) 1158 /* SPAN is readonly */ 1159 return (EINVAL); 1160 1161 if (req->ifbr_ifsflags & IFBIF_STP) { 1162 if ((bif->bif_flags & IFBIF_STP) == 0) { 1163 error = bstp_enable(&bif->bif_stp); 1164 if (error) 1165 return (error); 1166 } 1167 } else { 1168 if ((bif->bif_flags & IFBIF_STP) != 0) 1169 bstp_disable(&bif->bif_stp); 1170 } 1171 1172 /* Pass on STP flags */ 1173 bstp_set_edge(bp, req->ifbr_ifsflags & IFBIF_BSTP_EDGE ? 1 : 0); 1174 bstp_set_autoedge(bp, req->ifbr_ifsflags & IFBIF_BSTP_AUTOEDGE ? 1 : 0); 1175 bstp_set_ptp(bp, req->ifbr_ifsflags & IFBIF_BSTP_PTP ? 1 : 0); 1176 bstp_set_autoptp(bp, req->ifbr_ifsflags & IFBIF_BSTP_AUTOPTP ? 1 : 0); 1177 1178 /* Save the bits relating to the bridge */ 1179 bif->bif_flags = req->ifbr_ifsflags & IFBIFMASK; 1180 1181 return (0); 1182 } 1183 1184 static int 1185 bridge_ioctl_scache(struct bridge_softc *sc, void *arg) 1186 { 1187 struct ifbrparam *param = arg; 1188 1189 sc->sc_brtmax = param->ifbrp_csize; 1190 bridge_rttrim(sc); 1191 1192 return (0); 1193 } 1194 1195 static int 1196 bridge_ioctl_gcache(struct bridge_softc *sc, void *arg) 1197 { 1198 struct ifbrparam *param = arg; 1199 1200 param->ifbrp_csize = sc->sc_brtmax; 1201 1202 return (0); 1203 } 1204 1205 static int 1206 bridge_ioctl_gifs(struct bridge_softc *sc, void *arg) 1207 { 1208 struct ifbifconf *bifc = arg; 1209 struct bridge_iflist *bif; 1210 struct ifbreq breq; 1211 char *buf, *outbuf; 1212 int count, buflen, len, error = 0; 1213 1214 count = 0; 1215 LIST_FOREACH(bif, &sc->sc_iflist, bif_next) 1216 count++; 1217 LIST_FOREACH(bif, &sc->sc_spanlist, bif_next) 1218 count++; 1219 1220 buflen = sizeof(breq) * count; 1221 if (bifc->ifbic_len == 0) { 1222 bifc->ifbic_len = buflen; 1223 return (0); 1224 } 1225 BRIDGE_UNLOCK(sc); 1226 outbuf = malloc(buflen, M_TEMP, M_WAITOK | M_ZERO); 1227 BRIDGE_LOCK(sc); 1228 1229 count = 0; 1230 buf = outbuf; 1231 len = min(bifc->ifbic_len, buflen); 1232 bzero(&breq, sizeof(breq)); 1233 LIST_FOREACH(bif, &sc->sc_iflist, bif_next) { 1234 if (len < sizeof(breq)) 1235 break; 1236 1237 strlcpy(breq.ifbr_ifsname, bif->bif_ifp->if_xname, 1238 sizeof(breq.ifbr_ifsname)); 1239 /* Fill in the ifbreq structure */ 1240 error = bridge_ioctl_gifflags(sc, &breq); 1241 if (error) 1242 break; 1243 memcpy(buf, &breq, sizeof(breq)); 1244 count++; 1245 buf += sizeof(breq); 1246 len -= sizeof(breq); 1247 } 1248 LIST_FOREACH(bif, &sc->sc_spanlist, bif_next) { 1249 if (len < sizeof(breq)) 1250 break; 1251 1252 strlcpy(breq.ifbr_ifsname, bif->bif_ifp->if_xname, 1253 sizeof(breq.ifbr_ifsname)); 1254 breq.ifbr_ifsflags = bif->bif_flags; 1255 breq.ifbr_portno = bif->bif_ifp->if_index & 0xfff; 1256 memcpy(buf, &breq, sizeof(breq)); 1257 count++; 1258 buf += sizeof(breq); 1259 len -= sizeof(breq); 1260 } 1261 1262 BRIDGE_UNLOCK(sc); 1263 bifc->ifbic_len = sizeof(breq) * count; 1264 error = copyout(outbuf, bifc->ifbic_req, bifc->ifbic_len); 1265 BRIDGE_LOCK(sc); 1266 free(outbuf, M_TEMP); 1267 return (error); 1268 } 1269 1270 static int 1271 bridge_ioctl_rts(struct bridge_softc *sc, void *arg) 1272 { 1273 struct ifbaconf *bac = arg; 1274 struct bridge_rtnode *brt; 1275 struct ifbareq bareq; 1276 char *buf, *outbuf; 1277 int count, buflen, len, error = 0; 1278 1279 if (bac->ifbac_len == 0) 1280 return (0); 1281 1282 count = 0; 1283 LIST_FOREACH(brt, &sc->sc_rtlist, brt_list) 1284 count++; 1285 buflen = sizeof(bareq) * count; 1286 1287 BRIDGE_UNLOCK(sc); 1288 outbuf = malloc(buflen, M_TEMP, M_WAITOK | M_ZERO); 1289 BRIDGE_LOCK(sc); 1290 1291 count = 0; 1292 buf = outbuf; 1293 len = min(bac->ifbac_len, buflen); 1294 bzero(&bareq, sizeof(bareq)); 1295 LIST_FOREACH(brt, &sc->sc_rtlist, brt_list) { 1296 if (len < sizeof(bareq)) 1297 goto out; 1298 strlcpy(bareq.ifba_ifsname, brt->brt_ifp->if_xname, 1299 sizeof(bareq.ifba_ifsname)); 1300 memcpy(bareq.ifba_dst, brt->brt_addr, sizeof(brt->brt_addr)); 1301 bareq.ifba_vlan = brt->brt_vlan; 1302 if ((brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC && 1303 time_uptime < brt->brt_expire) 1304 bareq.ifba_expire = brt->brt_expire - time_uptime; 1305 else 1306 bareq.ifba_expire = 0; 1307 bareq.ifba_flags = brt->brt_flags; 1308 1309 memcpy(buf, &bareq, sizeof(bareq)); 1310 count++; 1311 buf += sizeof(bareq); 1312 len -= sizeof(bareq); 1313 } 1314 out: 1315 BRIDGE_UNLOCK(sc); 1316 bac->ifbac_len = sizeof(bareq) * count; 1317 error = copyout(outbuf, bac->ifbac_req, bac->ifbac_len); 1318 BRIDGE_LOCK(sc); 1319 free(outbuf, M_TEMP); 1320 return (error); 1321 } 1322 1323 static int 1324 bridge_ioctl_saddr(struct bridge_softc *sc, void *arg) 1325 { 1326 struct ifbareq *req = arg; 1327 struct bridge_iflist *bif; 1328 int error; 1329 1330 bif = bridge_lookup_member(sc, req->ifba_ifsname); 1331 if (bif == NULL) 1332 return (ENOENT); 1333 1334 error = bridge_rtupdate(sc, req->ifba_dst, req->ifba_vlan, bif, 1, 1335 req->ifba_flags); 1336 1337 return (error); 1338 } 1339 1340 static int 1341 bridge_ioctl_sto(struct bridge_softc *sc, void *arg) 1342 { 1343 struct ifbrparam *param = arg; 1344 1345 sc->sc_brttimeout = param->ifbrp_ctime; 1346 return (0); 1347 } 1348 1349 static int 1350 bridge_ioctl_gto(struct bridge_softc *sc, void *arg) 1351 { 1352 struct ifbrparam *param = arg; 1353 1354 param->ifbrp_ctime = sc->sc_brttimeout; 1355 return (0); 1356 } 1357 1358 static int 1359 bridge_ioctl_daddr(struct bridge_softc *sc, void *arg) 1360 { 1361 struct ifbareq *req = arg; 1362 1363 return (bridge_rtdaddr(sc, req->ifba_dst, req->ifba_vlan)); 1364 } 1365 1366 static int 1367 bridge_ioctl_flush(struct bridge_softc *sc, void *arg) 1368 { 1369 struct ifbreq *req = arg; 1370 1371 bridge_rtflush(sc, req->ifbr_ifsflags); 1372 return (0); 1373 } 1374 1375 static int 1376 bridge_ioctl_gpri(struct bridge_softc *sc, void *arg) 1377 { 1378 struct ifbrparam *param = arg; 1379 struct bstp_state *bs = &sc->sc_stp; 1380 1381 param->ifbrp_prio = bs->bs_bridge_priority; 1382 return (0); 1383 } 1384 1385 static int 1386 bridge_ioctl_spri(struct bridge_softc *sc, void *arg) 1387 { 1388 struct ifbrparam *param = arg; 1389 1390 return (bstp_set_priority(&sc->sc_stp, param->ifbrp_prio)); 1391 } 1392 1393 static int 1394 bridge_ioctl_ght(struct bridge_softc *sc, void *arg) 1395 { 1396 struct ifbrparam *param = arg; 1397 struct bstp_state *bs = &sc->sc_stp; 1398 1399 param->ifbrp_hellotime = bs->bs_bridge_htime >> 8; 1400 return (0); 1401 } 1402 1403 static int 1404 bridge_ioctl_sht(struct bridge_softc *sc, void *arg) 1405 { 1406 struct ifbrparam *param = arg; 1407 1408 return (bstp_set_htime(&sc->sc_stp, param->ifbrp_hellotime)); 1409 } 1410 1411 static int 1412 bridge_ioctl_gfd(struct bridge_softc *sc, void *arg) 1413 { 1414 struct ifbrparam *param = arg; 1415 struct bstp_state *bs = &sc->sc_stp; 1416 1417 param->ifbrp_fwddelay = bs->bs_bridge_fdelay >> 8; 1418 return (0); 1419 } 1420 1421 static int 1422 bridge_ioctl_sfd(struct bridge_softc *sc, void *arg) 1423 { 1424 struct ifbrparam *param = arg; 1425 1426 return (bstp_set_fdelay(&sc->sc_stp, param->ifbrp_fwddelay)); 1427 } 1428 1429 static int 1430 bridge_ioctl_gma(struct bridge_softc *sc, void *arg) 1431 { 1432 struct ifbrparam *param = arg; 1433 struct bstp_state *bs = &sc->sc_stp; 1434 1435 param->ifbrp_maxage = bs->bs_bridge_max_age >> 8; 1436 return (0); 1437 } 1438 1439 static int 1440 bridge_ioctl_sma(struct bridge_softc *sc, void *arg) 1441 { 1442 struct ifbrparam *param = arg; 1443 1444 return (bstp_set_maxage(&sc->sc_stp, param->ifbrp_maxage)); 1445 } 1446 1447 static int 1448 bridge_ioctl_sifprio(struct bridge_softc *sc, void *arg) 1449 { 1450 struct ifbreq *req = arg; 1451 struct bridge_iflist *bif; 1452 1453 bif = bridge_lookup_member(sc, req->ifbr_ifsname); 1454 if (bif == NULL) 1455 return (ENOENT); 1456 1457 return (bstp_set_port_priority(&bif->bif_stp, req->ifbr_priority)); 1458 } 1459 1460 static int 1461 bridge_ioctl_sifcost(struct bridge_softc *sc, void *arg) 1462 { 1463 struct ifbreq *req = arg; 1464 struct bridge_iflist *bif; 1465 1466 bif = bridge_lookup_member(sc, req->ifbr_ifsname); 1467 if (bif == NULL) 1468 return (ENOENT); 1469 1470 return (bstp_set_path_cost(&bif->bif_stp, req->ifbr_path_cost)); 1471 } 1472 1473 static int 1474 bridge_ioctl_sifmaxaddr(struct bridge_softc *sc, void *arg) 1475 { 1476 struct ifbreq *req = arg; 1477 struct bridge_iflist *bif; 1478 1479 bif = bridge_lookup_member(sc, req->ifbr_ifsname); 1480 if (bif == NULL) 1481 return (ENOENT); 1482 1483 bif->bif_addrmax = req->ifbr_addrmax; 1484 return (0); 1485 } 1486 1487 static int 1488 bridge_ioctl_addspan(struct bridge_softc *sc, void *arg) 1489 { 1490 struct ifbreq *req = arg; 1491 struct bridge_iflist *bif = NULL; 1492 struct ifnet *ifs; 1493 1494 ifs = ifunit(req->ifbr_ifsname); 1495 if (ifs == NULL) 1496 return (ENOENT); 1497 1498 LIST_FOREACH(bif, &sc->sc_spanlist, bif_next) 1499 if (ifs == bif->bif_ifp) 1500 return (EBUSY); 1501 1502 if (ifs->if_bridge != NULL) 1503 return (EBUSY); 1504 1505 switch (ifs->if_type) { 1506 case IFT_ETHER: 1507 case IFT_GIF: 1508 case IFT_L2VLAN: 1509 break; 1510 default: 1511 return (EINVAL); 1512 } 1513 1514 bif = malloc(sizeof(*bif), M_DEVBUF, M_NOWAIT|M_ZERO); 1515 if (bif == NULL) 1516 return (ENOMEM); 1517 1518 bif->bif_ifp = ifs; 1519 bif->bif_flags = IFBIF_SPAN; 1520 1521 LIST_INSERT_HEAD(&sc->sc_spanlist, bif, bif_next); 1522 1523 return (0); 1524 } 1525 1526 static int 1527 bridge_ioctl_delspan(struct bridge_softc *sc, void *arg) 1528 { 1529 struct ifbreq *req = arg; 1530 struct bridge_iflist *bif; 1531 struct ifnet *ifs; 1532 1533 ifs = ifunit(req->ifbr_ifsname); 1534 if (ifs == NULL) 1535 return (ENOENT); 1536 1537 LIST_FOREACH(bif, &sc->sc_spanlist, bif_next) 1538 if (ifs == bif->bif_ifp) 1539 break; 1540 1541 if (bif == NULL) 1542 return (ENOENT); 1543 1544 bridge_delete_span(sc, bif); 1545 1546 return (0); 1547 } 1548 1549 static int 1550 bridge_ioctl_gbparam(struct bridge_softc *sc, void *arg) 1551 { 1552 struct ifbropreq *req = arg; 1553 struct bstp_state *bs = &sc->sc_stp; 1554 struct bstp_port *root_port; 1555 1556 req->ifbop_maxage = bs->bs_bridge_max_age >> 8; 1557 req->ifbop_hellotime = bs->bs_bridge_htime >> 8; 1558 req->ifbop_fwddelay = bs->bs_bridge_fdelay >> 8; 1559 1560 root_port = bs->bs_root_port; 1561 if (root_port == NULL) 1562 req->ifbop_root_port = 0; 1563 else 1564 req->ifbop_root_port = root_port->bp_ifp->if_index; 1565 1566 req->ifbop_holdcount = bs->bs_txholdcount; 1567 req->ifbop_priority = bs->bs_bridge_priority; 1568 req->ifbop_protocol = bs->bs_protover; 1569 req->ifbop_root_path_cost = bs->bs_root_pv.pv_cost; 1570 req->ifbop_bridgeid = bs->bs_bridge_pv.pv_dbridge_id; 1571 req->ifbop_designated_root = bs->bs_root_pv.pv_root_id; 1572 req->ifbop_designated_bridge = bs->bs_root_pv.pv_dbridge_id; 1573 req->ifbop_last_tc_time.tv_sec = bs->bs_last_tc_time.tv_sec; 1574 req->ifbop_last_tc_time.tv_usec = bs->bs_last_tc_time.tv_usec; 1575 1576 return (0); 1577 } 1578 1579 static int 1580 bridge_ioctl_grte(struct bridge_softc *sc, void *arg) 1581 { 1582 struct ifbrparam *param = arg; 1583 1584 param->ifbrp_cexceeded = sc->sc_brtexceeded; 1585 return (0); 1586 } 1587 1588 static int 1589 bridge_ioctl_gifsstp(struct bridge_softc *sc, void *arg) 1590 { 1591 struct ifbpstpconf *bifstp = arg; 1592 struct bridge_iflist *bif; 1593 struct bstp_port *bp; 1594 struct ifbpstpreq bpreq; 1595 char *buf, *outbuf; 1596 int count, buflen, len, error = 0; 1597 1598 count = 0; 1599 LIST_FOREACH(bif, &sc->sc_iflist, bif_next) { 1600 if ((bif->bif_flags & IFBIF_STP) != 0) 1601 count++; 1602 } 1603 1604 buflen = sizeof(bpreq) * count; 1605 if (bifstp->ifbpstp_len == 0) { 1606 bifstp->ifbpstp_len = buflen; 1607 return (0); 1608 } 1609 1610 BRIDGE_UNLOCK(sc); 1611 outbuf = malloc(buflen, M_TEMP, M_WAITOK | M_ZERO); 1612 BRIDGE_LOCK(sc); 1613 1614 count = 0; 1615 buf = outbuf; 1616 len = min(bifstp->ifbpstp_len, buflen); 1617 bzero(&bpreq, sizeof(bpreq)); 1618 LIST_FOREACH(bif, &sc->sc_iflist, bif_next) { 1619 if (len < sizeof(bpreq)) 1620 break; 1621 1622 if ((bif->bif_flags & IFBIF_STP) == 0) 1623 continue; 1624 1625 bp = &bif->bif_stp; 1626 bpreq.ifbp_portno = bif->bif_ifp->if_index & 0xfff; 1627 bpreq.ifbp_fwd_trans = bp->bp_forward_transitions; 1628 bpreq.ifbp_design_cost = bp->bp_desg_pv.pv_cost; 1629 bpreq.ifbp_design_port = bp->bp_desg_pv.pv_port_id; 1630 bpreq.ifbp_design_bridge = bp->bp_desg_pv.pv_dbridge_id; 1631 bpreq.ifbp_design_root = bp->bp_desg_pv.pv_root_id; 1632 1633 memcpy(buf, &bpreq, sizeof(bpreq)); 1634 count++; 1635 buf += sizeof(bpreq); 1636 len -= sizeof(bpreq); 1637 } 1638 1639 BRIDGE_UNLOCK(sc); 1640 bifstp->ifbpstp_len = sizeof(bpreq) * count; 1641 error = copyout(outbuf, bifstp->ifbpstp_req, bifstp->ifbpstp_len); 1642 BRIDGE_LOCK(sc); 1643 free(outbuf, M_TEMP); 1644 return (error); 1645 } 1646 1647 static int 1648 bridge_ioctl_sproto(struct bridge_softc *sc, void *arg) 1649 { 1650 struct ifbrparam *param = arg; 1651 1652 return (bstp_set_protocol(&sc->sc_stp, param->ifbrp_proto)); 1653 } 1654 1655 static int 1656 bridge_ioctl_stxhc(struct bridge_softc *sc, void *arg) 1657 { 1658 struct ifbrparam *param = arg; 1659 1660 return (bstp_set_holdcount(&sc->sc_stp, param->ifbrp_txhc)); 1661 } 1662 1663 /* 1664 * bridge_ifdetach: 1665 * 1666 * Detach an interface from a bridge. Called when a member 1667 * interface is detaching. 1668 */ 1669 static void 1670 bridge_ifdetach(void *arg __unused, struct ifnet *ifp) 1671 { 1672 struct bridge_softc *sc = ifp->if_bridge; 1673 struct bridge_iflist *bif; 1674 1675 /* Check if the interface is a bridge member */ 1676 if (sc != NULL) { 1677 BRIDGE_LOCK(sc); 1678 1679 bif = bridge_lookup_member_if(sc, ifp); 1680 if (bif != NULL) 1681 bridge_delete_member(sc, bif, 1); 1682 1683 BRIDGE_UNLOCK(sc); 1684 return; 1685 } 1686 1687 /* Check if the interface is a span port */ 1688 mtx_lock(&bridge_list_mtx); 1689 LIST_FOREACH(sc, &bridge_list, sc_list) { 1690 BRIDGE_LOCK(sc); 1691 LIST_FOREACH(bif, &sc->sc_spanlist, bif_next) 1692 if (ifp == bif->bif_ifp) { 1693 bridge_delete_span(sc, bif); 1694 break; 1695 } 1696 1697 BRIDGE_UNLOCK(sc); 1698 } 1699 mtx_unlock(&bridge_list_mtx); 1700 } 1701 1702 /* 1703 * bridge_init: 1704 * 1705 * Initialize a bridge interface. 1706 */ 1707 static void 1708 bridge_init(void *xsc) 1709 { 1710 struct bridge_softc *sc = (struct bridge_softc *)xsc; 1711 struct ifnet *ifp = sc->sc_ifp; 1712 1713 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 1714 return; 1715 1716 BRIDGE_LOCK(sc); 1717 callout_reset(&sc->sc_brcallout, bridge_rtable_prune_period * hz, 1718 bridge_timer, sc); 1719 1720 ifp->if_drv_flags |= IFF_DRV_RUNNING; 1721 bstp_init(&sc->sc_stp); /* Initialize Spanning Tree */ 1722 1723 BRIDGE_UNLOCK(sc); 1724 } 1725 1726 /* 1727 * bridge_stop: 1728 * 1729 * Stop the bridge interface. 1730 */ 1731 static void 1732 bridge_stop(struct ifnet *ifp, int disable) 1733 { 1734 struct bridge_softc *sc = ifp->if_softc; 1735 1736 BRIDGE_LOCK_ASSERT(sc); 1737 1738 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) 1739 return; 1740 1741 callout_stop(&sc->sc_brcallout); 1742 bstp_stop(&sc->sc_stp); 1743 1744 bridge_rtflush(sc, IFBF_FLUSHDYN); 1745 1746 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1747 } 1748 1749 /* 1750 * bridge_enqueue: 1751 * 1752 * Enqueue a packet on a bridge member interface. 1753 * 1754 */ 1755 static void 1756 bridge_enqueue(struct bridge_softc *sc, struct ifnet *dst_ifp, struct mbuf *m) 1757 { 1758 int len, err = 0; 1759 short mflags; 1760 struct mbuf *m0; 1761 1762 len = m->m_pkthdr.len; 1763 mflags = m->m_flags; 1764 1765 /* We may be sending a fragment so traverse the mbuf */ 1766 for (; m; m = m0) { 1767 m0 = m->m_nextpkt; 1768 m->m_nextpkt = NULL; 1769 1770 /* 1771 * If underlying interface can not do VLAN tag insertion itself 1772 * then attach a packet tag that holds it. 1773 */ 1774 if ((m->m_flags & M_VLANTAG) && 1775 (dst_ifp->if_capenable & IFCAP_VLAN_HWTAGGING) == 0) { 1776 m = ether_vlanencap(m, m->m_pkthdr.ether_vtag); 1777 if (m == NULL) { 1778 if_printf(dst_ifp, 1779 "unable to prepend VLAN header\n"); 1780 dst_ifp->if_oerrors++; 1781 continue; 1782 } 1783 m->m_flags &= ~M_VLANTAG; 1784 } 1785 1786 if (err == 0) 1787 dst_ifp->if_transmit(dst_ifp, m); 1788 } 1789 1790 if (err == 0) { 1791 sc->sc_ifp->if_opackets++; 1792 sc->sc_ifp->if_obytes += len; 1793 if (mflags & M_MCAST) 1794 sc->sc_ifp->if_omcasts++; 1795 } 1796 } 1797 1798 /* 1799 * bridge_dummynet: 1800 * 1801 * Receive a queued packet from dummynet and pass it on to the output 1802 * interface. 1803 * 1804 * The mbuf has the Ethernet header already attached. 1805 */ 1806 static void 1807 bridge_dummynet(struct mbuf *m, struct ifnet *ifp) 1808 { 1809 struct bridge_softc *sc; 1810 1811 sc = ifp->if_bridge; 1812 1813 /* 1814 * The packet didnt originate from a member interface. This should only 1815 * ever happen if a member interface is removed while packets are 1816 * queued for it. 1817 */ 1818 if (sc == NULL) { 1819 m_freem(m); 1820 return; 1821 } 1822 1823 if (PFIL_HOOKED(&V_inet_pfil_hook) 1824 #ifdef INET6 1825 || PFIL_HOOKED(&V_inet6_pfil_hook) 1826 #endif 1827 ) { 1828 if (bridge_pfil(&m, sc->sc_ifp, ifp, PFIL_OUT) != 0) 1829 return; 1830 if (m == NULL) 1831 return; 1832 } 1833 1834 bridge_enqueue(sc, ifp, m); 1835 } 1836 1837 /* 1838 * bridge_output: 1839 * 1840 * Send output from a bridge member interface. This 1841 * performs the bridging function for locally originated 1842 * packets. 1843 * 1844 * The mbuf has the Ethernet header already attached. We must 1845 * enqueue or free the mbuf before returning. 1846 */ 1847 static int 1848 bridge_output(struct ifnet *ifp, struct mbuf *m, struct sockaddr *sa, 1849 struct rtentry *rt) 1850 { 1851 struct ether_header *eh; 1852 struct ifnet *dst_if; 1853 struct bridge_softc *sc; 1854 uint16_t vlan; 1855 1856 if (m->m_len < ETHER_HDR_LEN) { 1857 m = m_pullup(m, ETHER_HDR_LEN); 1858 if (m == NULL) 1859 return (0); 1860 } 1861 1862 eh = mtod(m, struct ether_header *); 1863 sc = ifp->if_bridge; 1864 vlan = VLANTAGOF(m); 1865 1866 BRIDGE_LOCK(sc); 1867 1868 /* 1869 * If bridge is down, but the original output interface is up, 1870 * go ahead and send out that interface. Otherwise, the packet 1871 * is dropped below. 1872 */ 1873 if ((sc->sc_ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { 1874 dst_if = ifp; 1875 goto sendunicast; 1876 } 1877 1878 /* 1879 * If the packet is a multicast, or we don't know a better way to 1880 * get there, send to all interfaces. 1881 */ 1882 if (ETHER_IS_MULTICAST(eh->ether_dhost)) 1883 dst_if = NULL; 1884 else 1885 dst_if = bridge_rtlookup(sc, eh->ether_dhost, vlan); 1886 if (dst_if == NULL) { 1887 struct bridge_iflist *bif; 1888 struct mbuf *mc; 1889 int error = 0, used = 0; 1890 1891 bridge_span(sc, m); 1892 1893 BRIDGE_LOCK2REF(sc, error); 1894 if (error) { 1895 m_freem(m); 1896 return (0); 1897 } 1898 1899 LIST_FOREACH(bif, &sc->sc_iflist, bif_next) { 1900 dst_if = bif->bif_ifp; 1901 1902 if (dst_if->if_type == IFT_GIF) 1903 continue; 1904 if ((dst_if->if_drv_flags & IFF_DRV_RUNNING) == 0) 1905 continue; 1906 1907 /* 1908 * If this is not the original output interface, 1909 * and the interface is participating in spanning 1910 * tree, make sure the port is in a state that 1911 * allows forwarding. 1912 */ 1913 if (dst_if != ifp && (bif->bif_flags & IFBIF_STP) && 1914 bif->bif_stp.bp_state == BSTP_IFSTATE_DISCARDING) 1915 continue; 1916 1917 if (LIST_NEXT(bif, bif_next) == NULL) { 1918 used = 1; 1919 mc = m; 1920 } else { 1921 mc = m_copypacket(m, M_DONTWAIT); 1922 if (mc == NULL) { 1923 sc->sc_ifp->if_oerrors++; 1924 continue; 1925 } 1926 } 1927 1928 bridge_enqueue(sc, dst_if, mc); 1929 } 1930 if (used == 0) 1931 m_freem(m); 1932 BRIDGE_UNREF(sc); 1933 return (0); 1934 } 1935 1936 sendunicast: 1937 /* 1938 * XXX Spanning tree consideration here? 1939 */ 1940 1941 bridge_span(sc, m); 1942 if ((dst_if->if_drv_flags & IFF_DRV_RUNNING) == 0) { 1943 m_freem(m); 1944 BRIDGE_UNLOCK(sc); 1945 return (0); 1946 } 1947 1948 BRIDGE_UNLOCK(sc); 1949 bridge_enqueue(sc, dst_if, m); 1950 return (0); 1951 } 1952 1953 /* 1954 * bridge_start: 1955 * 1956 * Start output on a bridge. 1957 * 1958 */ 1959 static void 1960 bridge_start(struct ifnet *ifp) 1961 { 1962 struct bridge_softc *sc; 1963 struct mbuf *m; 1964 struct ether_header *eh; 1965 struct ifnet *dst_if; 1966 1967 sc = ifp->if_softc; 1968 1969 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 1970 for (;;) { 1971 IFQ_DEQUEUE(&ifp->if_snd, m); 1972 if (m == 0) 1973 break; 1974 ETHER_BPF_MTAP(ifp, m); 1975 1976 eh = mtod(m, struct ether_header *); 1977 dst_if = NULL; 1978 1979 BRIDGE_LOCK(sc); 1980 if ((m->m_flags & (M_BCAST|M_MCAST)) == 0) { 1981 dst_if = bridge_rtlookup(sc, eh->ether_dhost, 1); 1982 } 1983 1984 if (dst_if == NULL) 1985 bridge_broadcast(sc, ifp, m, 0); 1986 else { 1987 BRIDGE_UNLOCK(sc); 1988 bridge_enqueue(sc, dst_if, m); 1989 } 1990 } 1991 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 1992 } 1993 1994 /* 1995 * bridge_forward: 1996 * 1997 * The forwarding function of the bridge. 1998 * 1999 * NOTE: Releases the lock on return. 2000 */ 2001 static void 2002 bridge_forward(struct bridge_softc *sc, struct bridge_iflist *sbif, 2003 struct mbuf *m) 2004 { 2005 struct bridge_iflist *dbif; 2006 struct ifnet *src_if, *dst_if, *ifp; 2007 struct ether_header *eh; 2008 uint16_t vlan; 2009 uint8_t *dst; 2010 int error; 2011 2012 src_if = m->m_pkthdr.rcvif; 2013 ifp = sc->sc_ifp; 2014 2015 ifp->if_ipackets++; 2016 ifp->if_ibytes += m->m_pkthdr.len; 2017 vlan = VLANTAGOF(m); 2018 2019 if ((sbif->bif_flags & IFBIF_STP) && 2020 sbif->bif_stp.bp_state == BSTP_IFSTATE_DISCARDING) 2021 goto drop; 2022 2023 eh = mtod(m, struct ether_header *); 2024 dst = eh->ether_dhost; 2025 2026 /* If the interface is learning, record the address. */ 2027 if (sbif->bif_flags & IFBIF_LEARNING) { 2028 error = bridge_rtupdate(sc, eh->ether_shost, vlan, 2029 sbif, 0, IFBAF_DYNAMIC); 2030 /* 2031 * If the interface has addresses limits then deny any source 2032 * that is not in the cache. 2033 */ 2034 if (error && sbif->bif_addrmax) 2035 goto drop; 2036 } 2037 2038 if ((sbif->bif_flags & IFBIF_STP) != 0 && 2039 sbif->bif_stp.bp_state == BSTP_IFSTATE_LEARNING) 2040 goto drop; 2041 2042 /* 2043 * At this point, the port either doesn't participate 2044 * in spanning tree or it is in the forwarding state. 2045 */ 2046 2047 /* 2048 * If the packet is unicast, destined for someone on 2049 * "this" side of the bridge, drop it. 2050 */ 2051 if ((m->m_flags & (M_BCAST|M_MCAST)) == 0) { 2052 dst_if = bridge_rtlookup(sc, dst, vlan); 2053 if (src_if == dst_if) 2054 goto drop; 2055 } else { 2056 /* 2057 * Check if its a reserved multicast address, any address 2058 * listed in 802.1D section 7.12.6 may not be forwarded by the 2059 * bridge. 2060 * This is currently 01-80-C2-00-00-00 to 01-80-C2-00-00-0F 2061 */ 2062 if (dst[0] == 0x01 && dst[1] == 0x80 && 2063 dst[2] == 0xc2 && dst[3] == 0x00 && 2064 dst[4] == 0x00 && dst[5] <= 0x0f) 2065 goto drop; 2066 2067 /* ...forward it to all interfaces. */ 2068 ifp->if_imcasts++; 2069 dst_if = NULL; 2070 } 2071 2072 /* 2073 * If we have a destination interface which is a member of our bridge, 2074 * OR this is a unicast packet, push it through the bpf(4) machinery. 2075 * For broadcast or multicast packets, don't bother because it will 2076 * be reinjected into ether_input. We do this before we pass the packets 2077 * through the pfil(9) framework, as it is possible that pfil(9) will 2078 * drop the packet, or possibly modify it, making it difficult to debug 2079 * firewall issues on the bridge. 2080 */ 2081 if (dst_if != NULL || (m->m_flags & (M_BCAST | M_MCAST)) == 0) 2082 ETHER_BPF_MTAP(ifp, m); 2083 2084 /* run the packet filter */ 2085 if (PFIL_HOOKED(&V_inet_pfil_hook) 2086 #ifdef INET6 2087 || PFIL_HOOKED(&V_inet6_pfil_hook) 2088 #endif 2089 ) { 2090 BRIDGE_UNLOCK(sc); 2091 if (bridge_pfil(&m, ifp, src_if, PFIL_IN) != 0) 2092 return; 2093 if (m == NULL) 2094 return; 2095 BRIDGE_LOCK(sc); 2096 } 2097 2098 if (dst_if == NULL) { 2099 bridge_broadcast(sc, src_if, m, 1); 2100 return; 2101 } 2102 2103 /* 2104 * At this point, we're dealing with a unicast frame 2105 * going to a different interface. 2106 */ 2107 if ((dst_if->if_drv_flags & IFF_DRV_RUNNING) == 0) 2108 goto drop; 2109 2110 dbif = bridge_lookup_member_if(sc, dst_if); 2111 if (dbif == NULL) 2112 /* Not a member of the bridge (anymore?) */ 2113 goto drop; 2114 2115 /* Private segments can not talk to each other */ 2116 if (sbif->bif_flags & dbif->bif_flags & IFBIF_PRIVATE) 2117 goto drop; 2118 2119 if ((dbif->bif_flags & IFBIF_STP) && 2120 dbif->bif_stp.bp_state == BSTP_IFSTATE_DISCARDING) 2121 goto drop; 2122 2123 BRIDGE_UNLOCK(sc); 2124 2125 if (PFIL_HOOKED(&V_inet_pfil_hook) 2126 #ifdef INET6 2127 || PFIL_HOOKED(&V_inet6_pfil_hook) 2128 #endif 2129 ) { 2130 if (bridge_pfil(&m, ifp, dst_if, PFIL_OUT) != 0) 2131 return; 2132 if (m == NULL) 2133 return; 2134 } 2135 2136 bridge_enqueue(sc, dst_if, m); 2137 return; 2138 2139 drop: 2140 BRIDGE_UNLOCK(sc); 2141 m_freem(m); 2142 } 2143 2144 #if defined(INET) || defined(INET6) 2145 int (*carp_forus_p)(struct carp_if *, u_char *); 2146 #endif 2147 2148 /* 2149 * bridge_input: 2150 * 2151 * Receive input from a member interface. Queue the packet for 2152 * bridging if it is not for us. 2153 */ 2154 static struct mbuf * 2155 bridge_input(struct ifnet *ifp, struct mbuf *m) 2156 { 2157 struct bridge_softc *sc = ifp->if_bridge; 2158 struct bridge_iflist *bif, *bif2; 2159 struct ifnet *bifp; 2160 struct ether_header *eh; 2161 struct mbuf *mc, *mc2; 2162 uint16_t vlan; 2163 int error; 2164 2165 if ((sc->sc_ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) 2166 return (m); 2167 2168 bifp = sc->sc_ifp; 2169 vlan = VLANTAGOF(m); 2170 2171 /* 2172 * Implement support for bridge monitoring. If this flag has been 2173 * set on this interface, discard the packet once we push it through 2174 * the bpf(4) machinery, but before we do, increment the byte and 2175 * packet counters associated with this interface. 2176 */ 2177 if ((bifp->if_flags & IFF_MONITOR) != 0) { 2178 m->m_pkthdr.rcvif = bifp; 2179 ETHER_BPF_MTAP(bifp, m); 2180 bifp->if_ipackets++; 2181 bifp->if_ibytes += m->m_pkthdr.len; 2182 m_freem(m); 2183 return (NULL); 2184 } 2185 BRIDGE_LOCK(sc); 2186 bif = bridge_lookup_member_if(sc, ifp); 2187 if (bif == NULL) { 2188 BRIDGE_UNLOCK(sc); 2189 return (m); 2190 } 2191 2192 eh = mtod(m, struct ether_header *); 2193 2194 bridge_span(sc, m); 2195 2196 if (m->m_flags & (M_BCAST|M_MCAST)) { 2197 /* Tap off 802.1D packets; they do not get forwarded. */ 2198 if (memcmp(eh->ether_dhost, bstp_etheraddr, 2199 ETHER_ADDR_LEN) == 0) { 2200 m = bstp_input(&bif->bif_stp, ifp, m); 2201 if (m == NULL) { 2202 BRIDGE_UNLOCK(sc); 2203 return (NULL); 2204 } 2205 } 2206 2207 if ((bif->bif_flags & IFBIF_STP) && 2208 bif->bif_stp.bp_state == BSTP_IFSTATE_DISCARDING) { 2209 BRIDGE_UNLOCK(sc); 2210 return (m); 2211 } 2212 2213 /* 2214 * Make a deep copy of the packet and enqueue the copy 2215 * for bridge processing; return the original packet for 2216 * local processing. 2217 */ 2218 mc = m_dup(m, M_DONTWAIT); 2219 if (mc == NULL) { 2220 BRIDGE_UNLOCK(sc); 2221 return (m); 2222 } 2223 2224 /* Perform the bridge forwarding function with the copy. */ 2225 bridge_forward(sc, bif, mc); 2226 2227 /* 2228 * Reinject the mbuf as arriving on the bridge so we have a 2229 * chance at claiming multicast packets. We can not loop back 2230 * here from ether_input as a bridge is never a member of a 2231 * bridge. 2232 */ 2233 KASSERT(bifp->if_bridge == NULL, 2234 ("loop created in bridge_input")); 2235 mc2 = m_dup(m, M_DONTWAIT); 2236 if (mc2 != NULL) { 2237 /* Keep the layer3 header aligned */ 2238 int i = min(mc2->m_pkthdr.len, max_protohdr); 2239 mc2 = m_copyup(mc2, i, ETHER_ALIGN); 2240 } 2241 if (mc2 != NULL) { 2242 mc2->m_pkthdr.rcvif = bifp; 2243 (*bifp->if_input)(bifp, mc2); 2244 } 2245 2246 /* Return the original packet for local processing. */ 2247 return (m); 2248 } 2249 2250 if ((bif->bif_flags & IFBIF_STP) && 2251 bif->bif_stp.bp_state == BSTP_IFSTATE_DISCARDING) { 2252 BRIDGE_UNLOCK(sc); 2253 return (m); 2254 } 2255 2256 #if (defined(INET) || defined(INET6)) 2257 # define OR_CARP_CHECK_WE_ARE_DST(iface) \ 2258 || ((iface)->if_carp \ 2259 && (*carp_forus_p)((iface)->if_carp, eh->ether_dhost)) 2260 # define OR_CARP_CHECK_WE_ARE_SRC(iface) \ 2261 || ((iface)->if_carp \ 2262 && (*carp_forus_p)((iface)->if_carp, eh->ether_shost)) 2263 #else 2264 # define OR_CARP_CHECK_WE_ARE_DST(iface) 2265 # define OR_CARP_CHECK_WE_ARE_SRC(iface) 2266 #endif 2267 2268 #ifdef INET6 2269 # define OR_PFIL_HOOKED_INET6 \ 2270 || PFIL_HOOKED(&V_inet6_pfil_hook) 2271 #else 2272 # define OR_PFIL_HOOKED_INET6 2273 #endif 2274 2275 #define GRAB_OUR_PACKETS(iface) \ 2276 if ((iface)->if_type == IFT_GIF) \ 2277 continue; \ 2278 /* It is destined for us. */ \ 2279 if (memcmp(IF_LLADDR((iface)), eh->ether_dhost, ETHER_ADDR_LEN) == 0 \ 2280 OR_CARP_CHECK_WE_ARE_DST((iface)) \ 2281 ) { \ 2282 if ((iface)->if_type == IFT_BRIDGE) { \ 2283 ETHER_BPF_MTAP(iface, m); \ 2284 iface->if_ipackets++; \ 2285 /* Filter on the physical interface. */ \ 2286 if (pfil_local_phys && \ 2287 (PFIL_HOOKED(&V_inet_pfil_hook) \ 2288 OR_PFIL_HOOKED_INET6)) { \ 2289 if (bridge_pfil(&m, NULL, ifp, \ 2290 PFIL_IN) != 0 || m == NULL) { \ 2291 BRIDGE_UNLOCK(sc); \ 2292 return (NULL); \ 2293 } \ 2294 } \ 2295 } \ 2296 if (bif->bif_flags & IFBIF_LEARNING) { \ 2297 error = bridge_rtupdate(sc, eh->ether_shost, \ 2298 vlan, bif, 0, IFBAF_DYNAMIC); \ 2299 if (error && bif->bif_addrmax) { \ 2300 BRIDGE_UNLOCK(sc); \ 2301 m_freem(m); \ 2302 return (NULL); \ 2303 } \ 2304 } \ 2305 m->m_pkthdr.rcvif = iface; \ 2306 BRIDGE_UNLOCK(sc); \ 2307 return (m); \ 2308 } \ 2309 \ 2310 /* We just received a packet that we sent out. */ \ 2311 if (memcmp(IF_LLADDR((iface)), eh->ether_shost, ETHER_ADDR_LEN) == 0 \ 2312 OR_CARP_CHECK_WE_ARE_SRC((iface)) \ 2313 ) { \ 2314 BRIDGE_UNLOCK(sc); \ 2315 m_freem(m); \ 2316 return (NULL); \ 2317 } 2318 2319 /* 2320 * Unicast. Make sure it's not for the bridge. 2321 */ 2322 do { GRAB_OUR_PACKETS(bifp) } while (0); 2323 2324 /* 2325 * Give a chance for ifp at first priority. This will help when the 2326 * packet comes through the interface like VLAN's with the same MACs 2327 * on several interfaces from the same bridge. This also will save 2328 * some CPU cycles in case the destination interface and the input 2329 * interface (eq ifp) are the same. 2330 */ 2331 do { GRAB_OUR_PACKETS(ifp) } while (0); 2332 2333 /* Now check the all bridge members. */ 2334 LIST_FOREACH(bif2, &sc->sc_iflist, bif_next) { 2335 GRAB_OUR_PACKETS(bif2->bif_ifp) 2336 } 2337 2338 #undef OR_CARP_CHECK_WE_ARE_DST 2339 #undef OR_CARP_CHECK_WE_ARE_SRC 2340 #undef OR_PFIL_HOOKED_INET6 2341 #undef GRAB_OUR_PACKETS 2342 2343 /* Perform the bridge forwarding function. */ 2344 bridge_forward(sc, bif, m); 2345 2346 return (NULL); 2347 } 2348 2349 /* 2350 * bridge_broadcast: 2351 * 2352 * Send a frame to all interfaces that are members of 2353 * the bridge, except for the one on which the packet 2354 * arrived. 2355 * 2356 * NOTE: Releases the lock on return. 2357 */ 2358 static void 2359 bridge_broadcast(struct bridge_softc *sc, struct ifnet *src_if, 2360 struct mbuf *m, int runfilt) 2361 { 2362 struct bridge_iflist *dbif, *sbif; 2363 struct mbuf *mc; 2364 struct ifnet *dst_if; 2365 int error = 0, used = 0, i; 2366 2367 sbif = bridge_lookup_member_if(sc, src_if); 2368 2369 BRIDGE_LOCK2REF(sc, error); 2370 if (error) { 2371 m_freem(m); 2372 return; 2373 } 2374 2375 /* Filter on the bridge interface before broadcasting */ 2376 if (runfilt && (PFIL_HOOKED(&V_inet_pfil_hook) 2377 #ifdef INET6 2378 || PFIL_HOOKED(&V_inet6_pfil_hook) 2379 #endif 2380 )) { 2381 if (bridge_pfil(&m, sc->sc_ifp, NULL, PFIL_OUT) != 0) 2382 goto out; 2383 if (m == NULL) 2384 goto out; 2385 } 2386 2387 LIST_FOREACH(dbif, &sc->sc_iflist, bif_next) { 2388 dst_if = dbif->bif_ifp; 2389 if (dst_if == src_if) 2390 continue; 2391 2392 /* Private segments can not talk to each other */ 2393 if (sbif && (sbif->bif_flags & dbif->bif_flags & IFBIF_PRIVATE)) 2394 continue; 2395 2396 if ((dbif->bif_flags & IFBIF_STP) && 2397 dbif->bif_stp.bp_state == BSTP_IFSTATE_DISCARDING) 2398 continue; 2399 2400 if ((dbif->bif_flags & IFBIF_DISCOVER) == 0 && 2401 (m->m_flags & (M_BCAST|M_MCAST)) == 0) 2402 continue; 2403 2404 if ((dst_if->if_drv_flags & IFF_DRV_RUNNING) == 0) 2405 continue; 2406 2407 if (LIST_NEXT(dbif, bif_next) == NULL) { 2408 mc = m; 2409 used = 1; 2410 } else { 2411 mc = m_dup(m, M_DONTWAIT); 2412 if (mc == NULL) { 2413 sc->sc_ifp->if_oerrors++; 2414 continue; 2415 } 2416 } 2417 2418 /* 2419 * Filter on the output interface. Pass a NULL bridge interface 2420 * pointer so we do not redundantly filter on the bridge for 2421 * each interface we broadcast on. 2422 */ 2423 if (runfilt && (PFIL_HOOKED(&V_inet_pfil_hook) 2424 #ifdef INET6 2425 || PFIL_HOOKED(&V_inet6_pfil_hook) 2426 #endif 2427 )) { 2428 if (used == 0) { 2429 /* Keep the layer3 header aligned */ 2430 i = min(mc->m_pkthdr.len, max_protohdr); 2431 mc = m_copyup(mc, i, ETHER_ALIGN); 2432 if (mc == NULL) { 2433 sc->sc_ifp->if_oerrors++; 2434 continue; 2435 } 2436 } 2437 if (bridge_pfil(&mc, NULL, dst_if, PFIL_OUT) != 0) 2438 continue; 2439 if (mc == NULL) 2440 continue; 2441 } 2442 2443 bridge_enqueue(sc, dst_if, mc); 2444 } 2445 if (used == 0) 2446 m_freem(m); 2447 2448 out: 2449 BRIDGE_UNREF(sc); 2450 } 2451 2452 /* 2453 * bridge_span: 2454 * 2455 * Duplicate a packet out one or more interfaces that are in span mode, 2456 * the original mbuf is unmodified. 2457 */ 2458 static void 2459 bridge_span(struct bridge_softc *sc, struct mbuf *m) 2460 { 2461 struct bridge_iflist *bif; 2462 struct ifnet *dst_if; 2463 struct mbuf *mc; 2464 2465 if (LIST_EMPTY(&sc->sc_spanlist)) 2466 return; 2467 2468 LIST_FOREACH(bif, &sc->sc_spanlist, bif_next) { 2469 dst_if = bif->bif_ifp; 2470 2471 if ((dst_if->if_drv_flags & IFF_DRV_RUNNING) == 0) 2472 continue; 2473 2474 mc = m_copypacket(m, M_DONTWAIT); 2475 if (mc == NULL) { 2476 sc->sc_ifp->if_oerrors++; 2477 continue; 2478 } 2479 2480 bridge_enqueue(sc, dst_if, mc); 2481 } 2482 } 2483 2484 /* 2485 * bridge_rtupdate: 2486 * 2487 * Add a bridge routing entry. 2488 */ 2489 static int 2490 bridge_rtupdate(struct bridge_softc *sc, const uint8_t *dst, uint16_t vlan, 2491 struct bridge_iflist *bif, int setflags, uint8_t flags) 2492 { 2493 struct bridge_rtnode *brt; 2494 int error; 2495 2496 BRIDGE_LOCK_ASSERT(sc); 2497 2498 /* Check the source address is valid and not multicast. */ 2499 if (ETHER_IS_MULTICAST(dst) || 2500 (dst[0] == 0 && dst[1] == 0 && dst[2] == 0 && 2501 dst[3] == 0 && dst[4] == 0 && dst[5] == 0) != 0) 2502 return (EINVAL); 2503 2504 /* 802.1p frames map to vlan 1 */ 2505 if (vlan == 0) 2506 vlan = 1; 2507 2508 /* 2509 * A route for this destination might already exist. If so, 2510 * update it, otherwise create a new one. 2511 */ 2512 if ((brt = bridge_rtnode_lookup(sc, dst, vlan)) == NULL) { 2513 if (sc->sc_brtcnt >= sc->sc_brtmax) { 2514 sc->sc_brtexceeded++; 2515 return (ENOSPC); 2516 } 2517 /* Check per interface address limits (if enabled) */ 2518 if (bif->bif_addrmax && bif->bif_addrcnt >= bif->bif_addrmax) { 2519 bif->bif_addrexceeded++; 2520 return (ENOSPC); 2521 } 2522 2523 /* 2524 * Allocate a new bridge forwarding node, and 2525 * initialize the expiration time and Ethernet 2526 * address. 2527 */ 2528 brt = uma_zalloc(bridge_rtnode_zone, M_NOWAIT | M_ZERO); 2529 if (brt == NULL) 2530 return (ENOMEM); 2531 2532 if (bif->bif_flags & IFBIF_STICKY) 2533 brt->brt_flags = IFBAF_STICKY; 2534 else 2535 brt->brt_flags = IFBAF_DYNAMIC; 2536 2537 memcpy(brt->brt_addr, dst, ETHER_ADDR_LEN); 2538 brt->brt_vlan = vlan; 2539 2540 if ((error = bridge_rtnode_insert(sc, brt)) != 0) { 2541 uma_zfree(bridge_rtnode_zone, brt); 2542 return (error); 2543 } 2544 brt->brt_dst = bif; 2545 bif->bif_addrcnt++; 2546 } 2547 2548 if ((brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC && 2549 brt->brt_dst != bif) { 2550 brt->brt_dst->bif_addrcnt--; 2551 brt->brt_dst = bif; 2552 brt->brt_dst->bif_addrcnt++; 2553 } 2554 2555 if ((flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC) 2556 brt->brt_expire = time_uptime + sc->sc_brttimeout; 2557 if (setflags) 2558 brt->brt_flags = flags; 2559 2560 return (0); 2561 } 2562 2563 /* 2564 * bridge_rtlookup: 2565 * 2566 * Lookup the destination interface for an address. 2567 */ 2568 static struct ifnet * 2569 bridge_rtlookup(struct bridge_softc *sc, const uint8_t *addr, uint16_t vlan) 2570 { 2571 struct bridge_rtnode *brt; 2572 2573 BRIDGE_LOCK_ASSERT(sc); 2574 2575 if ((brt = bridge_rtnode_lookup(sc, addr, vlan)) == NULL) 2576 return (NULL); 2577 2578 return (brt->brt_ifp); 2579 } 2580 2581 /* 2582 * bridge_rttrim: 2583 * 2584 * Trim the routine table so that we have a number 2585 * of routing entries less than or equal to the 2586 * maximum number. 2587 */ 2588 static void 2589 bridge_rttrim(struct bridge_softc *sc) 2590 { 2591 struct bridge_rtnode *brt, *nbrt; 2592 2593 BRIDGE_LOCK_ASSERT(sc); 2594 2595 /* Make sure we actually need to do this. */ 2596 if (sc->sc_brtcnt <= sc->sc_brtmax) 2597 return; 2598 2599 /* Force an aging cycle; this might trim enough addresses. */ 2600 bridge_rtage(sc); 2601 if (sc->sc_brtcnt <= sc->sc_brtmax) 2602 return; 2603 2604 LIST_FOREACH_SAFE(brt, &sc->sc_rtlist, brt_list, nbrt) { 2605 if ((brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC) { 2606 bridge_rtnode_destroy(sc, brt); 2607 if (sc->sc_brtcnt <= sc->sc_brtmax) 2608 return; 2609 } 2610 } 2611 } 2612 2613 /* 2614 * bridge_timer: 2615 * 2616 * Aging timer for the bridge. 2617 */ 2618 static void 2619 bridge_timer(void *arg) 2620 { 2621 struct bridge_softc *sc = arg; 2622 2623 BRIDGE_LOCK_ASSERT(sc); 2624 2625 bridge_rtage(sc); 2626 2627 if (sc->sc_ifp->if_drv_flags & IFF_DRV_RUNNING) 2628 callout_reset(&sc->sc_brcallout, 2629 bridge_rtable_prune_period * hz, bridge_timer, sc); 2630 } 2631 2632 /* 2633 * bridge_rtage: 2634 * 2635 * Perform an aging cycle. 2636 */ 2637 static void 2638 bridge_rtage(struct bridge_softc *sc) 2639 { 2640 struct bridge_rtnode *brt, *nbrt; 2641 2642 BRIDGE_LOCK_ASSERT(sc); 2643 2644 LIST_FOREACH_SAFE(brt, &sc->sc_rtlist, brt_list, nbrt) { 2645 if ((brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC) { 2646 if (time_uptime >= brt->brt_expire) 2647 bridge_rtnode_destroy(sc, brt); 2648 } 2649 } 2650 } 2651 2652 /* 2653 * bridge_rtflush: 2654 * 2655 * Remove all dynamic addresses from the bridge. 2656 */ 2657 static void 2658 bridge_rtflush(struct bridge_softc *sc, int full) 2659 { 2660 struct bridge_rtnode *brt, *nbrt; 2661 2662 BRIDGE_LOCK_ASSERT(sc); 2663 2664 LIST_FOREACH_SAFE(brt, &sc->sc_rtlist, brt_list, nbrt) { 2665 if (full || (brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC) 2666 bridge_rtnode_destroy(sc, brt); 2667 } 2668 } 2669 2670 /* 2671 * bridge_rtdaddr: 2672 * 2673 * Remove an address from the table. 2674 */ 2675 static int 2676 bridge_rtdaddr(struct bridge_softc *sc, const uint8_t *addr, uint16_t vlan) 2677 { 2678 struct bridge_rtnode *brt; 2679 int found = 0; 2680 2681 BRIDGE_LOCK_ASSERT(sc); 2682 2683 /* 2684 * If vlan is zero then we want to delete for all vlans so the lookup 2685 * may return more than one. 2686 */ 2687 while ((brt = bridge_rtnode_lookup(sc, addr, vlan)) != NULL) { 2688 bridge_rtnode_destroy(sc, brt); 2689 found = 1; 2690 } 2691 2692 return (found ? 0 : ENOENT); 2693 } 2694 2695 /* 2696 * bridge_rtdelete: 2697 * 2698 * Delete routes to a speicifc member interface. 2699 */ 2700 static void 2701 bridge_rtdelete(struct bridge_softc *sc, struct ifnet *ifp, int full) 2702 { 2703 struct bridge_rtnode *brt, *nbrt; 2704 2705 BRIDGE_LOCK_ASSERT(sc); 2706 2707 LIST_FOREACH_SAFE(brt, &sc->sc_rtlist, brt_list, nbrt) { 2708 if (brt->brt_ifp == ifp && (full || 2709 (brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC)) 2710 bridge_rtnode_destroy(sc, brt); 2711 } 2712 } 2713 2714 /* 2715 * bridge_rtable_init: 2716 * 2717 * Initialize the route table for this bridge. 2718 */ 2719 static int 2720 bridge_rtable_init(struct bridge_softc *sc) 2721 { 2722 int i; 2723 2724 sc->sc_rthash = malloc(sizeof(*sc->sc_rthash) * BRIDGE_RTHASH_SIZE, 2725 M_DEVBUF, M_NOWAIT); 2726 if (sc->sc_rthash == NULL) 2727 return (ENOMEM); 2728 2729 for (i = 0; i < BRIDGE_RTHASH_SIZE; i++) 2730 LIST_INIT(&sc->sc_rthash[i]); 2731 2732 sc->sc_rthash_key = arc4random(); 2733 2734 LIST_INIT(&sc->sc_rtlist); 2735 2736 return (0); 2737 } 2738 2739 /* 2740 * bridge_rtable_fini: 2741 * 2742 * Deconstruct the route table for this bridge. 2743 */ 2744 static void 2745 bridge_rtable_fini(struct bridge_softc *sc) 2746 { 2747 2748 KASSERT(sc->sc_brtcnt == 0, 2749 ("%s: %d bridge routes referenced", __func__, sc->sc_brtcnt)); 2750 free(sc->sc_rthash, M_DEVBUF); 2751 } 2752 2753 /* 2754 * The following hash function is adapted from "Hash Functions" by Bob Jenkins 2755 * ("Algorithm Alley", Dr. Dobbs Journal, September 1997). 2756 */ 2757 #define mix(a, b, c) \ 2758 do { \ 2759 a -= b; a -= c; a ^= (c >> 13); \ 2760 b -= c; b -= a; b ^= (a << 8); \ 2761 c -= a; c -= b; c ^= (b >> 13); \ 2762 a -= b; a -= c; a ^= (c >> 12); \ 2763 b -= c; b -= a; b ^= (a << 16); \ 2764 c -= a; c -= b; c ^= (b >> 5); \ 2765 a -= b; a -= c; a ^= (c >> 3); \ 2766 b -= c; b -= a; b ^= (a << 10); \ 2767 c -= a; c -= b; c ^= (b >> 15); \ 2768 } while (/*CONSTCOND*/0) 2769 2770 static __inline uint32_t 2771 bridge_rthash(struct bridge_softc *sc, const uint8_t *addr) 2772 { 2773 uint32_t a = 0x9e3779b9, b = 0x9e3779b9, c = sc->sc_rthash_key; 2774 2775 b += addr[5] << 8; 2776 b += addr[4]; 2777 a += addr[3] << 24; 2778 a += addr[2] << 16; 2779 a += addr[1] << 8; 2780 a += addr[0]; 2781 2782 mix(a, b, c); 2783 2784 return (c & BRIDGE_RTHASH_MASK); 2785 } 2786 2787 #undef mix 2788 2789 static int 2790 bridge_rtnode_addr_cmp(const uint8_t *a, const uint8_t *b) 2791 { 2792 int i, d; 2793 2794 for (i = 0, d = 0; i < ETHER_ADDR_LEN && d == 0; i++) { 2795 d = ((int)a[i]) - ((int)b[i]); 2796 } 2797 2798 return (d); 2799 } 2800 2801 /* 2802 * bridge_rtnode_lookup: 2803 * 2804 * Look up a bridge route node for the specified destination. Compare the 2805 * vlan id or if zero then just return the first match. 2806 */ 2807 static struct bridge_rtnode * 2808 bridge_rtnode_lookup(struct bridge_softc *sc, const uint8_t *addr, uint16_t vlan) 2809 { 2810 struct bridge_rtnode *brt; 2811 uint32_t hash; 2812 int dir; 2813 2814 BRIDGE_LOCK_ASSERT(sc); 2815 2816 hash = bridge_rthash(sc, addr); 2817 LIST_FOREACH(brt, &sc->sc_rthash[hash], brt_hash) { 2818 dir = bridge_rtnode_addr_cmp(addr, brt->brt_addr); 2819 if (dir == 0 && (brt->brt_vlan == vlan || vlan == 0)) 2820 return (brt); 2821 if (dir > 0) 2822 return (NULL); 2823 } 2824 2825 return (NULL); 2826 } 2827 2828 /* 2829 * bridge_rtnode_insert: 2830 * 2831 * Insert the specified bridge node into the route table. We 2832 * assume the entry is not already in the table. 2833 */ 2834 static int 2835 bridge_rtnode_insert(struct bridge_softc *sc, struct bridge_rtnode *brt) 2836 { 2837 struct bridge_rtnode *lbrt; 2838 uint32_t hash; 2839 int dir; 2840 2841 BRIDGE_LOCK_ASSERT(sc); 2842 2843 hash = bridge_rthash(sc, brt->brt_addr); 2844 2845 lbrt = LIST_FIRST(&sc->sc_rthash[hash]); 2846 if (lbrt == NULL) { 2847 LIST_INSERT_HEAD(&sc->sc_rthash[hash], brt, brt_hash); 2848 goto out; 2849 } 2850 2851 do { 2852 dir = bridge_rtnode_addr_cmp(brt->brt_addr, lbrt->brt_addr); 2853 if (dir == 0 && brt->brt_vlan == lbrt->brt_vlan) 2854 return (EEXIST); 2855 if (dir > 0) { 2856 LIST_INSERT_BEFORE(lbrt, brt, brt_hash); 2857 goto out; 2858 } 2859 if (LIST_NEXT(lbrt, brt_hash) == NULL) { 2860 LIST_INSERT_AFTER(lbrt, brt, brt_hash); 2861 goto out; 2862 } 2863 lbrt = LIST_NEXT(lbrt, brt_hash); 2864 } while (lbrt != NULL); 2865 2866 #ifdef DIAGNOSTIC 2867 panic("bridge_rtnode_insert: impossible"); 2868 #endif 2869 2870 out: 2871 LIST_INSERT_HEAD(&sc->sc_rtlist, brt, brt_list); 2872 sc->sc_brtcnt++; 2873 2874 return (0); 2875 } 2876 2877 /* 2878 * bridge_rtnode_destroy: 2879 * 2880 * Destroy a bridge rtnode. 2881 */ 2882 static void 2883 bridge_rtnode_destroy(struct bridge_softc *sc, struct bridge_rtnode *brt) 2884 { 2885 BRIDGE_LOCK_ASSERT(sc); 2886 2887 LIST_REMOVE(brt, brt_hash); 2888 2889 LIST_REMOVE(brt, brt_list); 2890 sc->sc_brtcnt--; 2891 brt->brt_dst->bif_addrcnt--; 2892 uma_zfree(bridge_rtnode_zone, brt); 2893 } 2894 2895 /* 2896 * bridge_rtable_expire: 2897 * 2898 * Set the expiry time for all routes on an interface. 2899 */ 2900 static void 2901 bridge_rtable_expire(struct ifnet *ifp, int age) 2902 { 2903 struct bridge_softc *sc = ifp->if_bridge; 2904 struct bridge_rtnode *brt; 2905 2906 BRIDGE_LOCK(sc); 2907 2908 /* 2909 * If the age is zero then flush, otherwise set all the expiry times to 2910 * age for the interface 2911 */ 2912 if (age == 0) 2913 bridge_rtdelete(sc, ifp, IFBF_FLUSHDYN); 2914 else { 2915 LIST_FOREACH(brt, &sc->sc_rtlist, brt_list) { 2916 /* Cap the expiry time to 'age' */ 2917 if (brt->brt_ifp == ifp && 2918 brt->brt_expire > time_uptime + age && 2919 (brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC) 2920 brt->brt_expire = time_uptime + age; 2921 } 2922 } 2923 BRIDGE_UNLOCK(sc); 2924 } 2925 2926 /* 2927 * bridge_state_change: 2928 * 2929 * Callback from the bridgestp code when a port changes states. 2930 */ 2931 static void 2932 bridge_state_change(struct ifnet *ifp, int state) 2933 { 2934 struct bridge_softc *sc = ifp->if_bridge; 2935 static const char *stpstates[] = { 2936 "disabled", 2937 "listening", 2938 "learning", 2939 "forwarding", 2940 "blocking", 2941 "discarding" 2942 }; 2943 2944 if (log_stp) 2945 log(LOG_NOTICE, "%s: state changed to %s on %s\n", 2946 sc->sc_ifp->if_xname, stpstates[state], ifp->if_xname); 2947 } 2948 2949 /* 2950 * Send bridge packets through pfil if they are one of the types pfil can deal 2951 * with, or if they are ARP or REVARP. (pfil will pass ARP and REVARP without 2952 * question.) If *bifp or *ifp are NULL then packet filtering is skipped for 2953 * that interface. 2954 */ 2955 static int 2956 bridge_pfil(struct mbuf **mp, struct ifnet *bifp, struct ifnet *ifp, int dir) 2957 { 2958 int snap, error, i, hlen; 2959 struct ether_header *eh1, eh2; 2960 struct ip_fw_args args; 2961 struct ip *ip; 2962 struct llc llc1; 2963 u_int16_t ether_type; 2964 2965 snap = 0; 2966 error = -1; /* Default error if not error == 0 */ 2967 2968 #if 0 2969 /* we may return with the IP fields swapped, ensure its not shared */ 2970 KASSERT(M_WRITABLE(*mp), ("%s: modifying a shared mbuf", __func__)); 2971 #endif 2972 2973 if (pfil_bridge == 0 && pfil_member == 0 && pfil_ipfw == 0) 2974 return (0); /* filtering is disabled */ 2975 2976 i = min((*mp)->m_pkthdr.len, max_protohdr); 2977 if ((*mp)->m_len < i) { 2978 *mp = m_pullup(*mp, i); 2979 if (*mp == NULL) { 2980 printf("%s: m_pullup failed\n", __func__); 2981 return (-1); 2982 } 2983 } 2984 2985 eh1 = mtod(*mp, struct ether_header *); 2986 ether_type = ntohs(eh1->ether_type); 2987 2988 /* 2989 * Check for SNAP/LLC. 2990 */ 2991 if (ether_type < ETHERMTU) { 2992 struct llc *llc2 = (struct llc *)(eh1 + 1); 2993 2994 if ((*mp)->m_len >= ETHER_HDR_LEN + 8 && 2995 llc2->llc_dsap == LLC_SNAP_LSAP && 2996 llc2->llc_ssap == LLC_SNAP_LSAP && 2997 llc2->llc_control == LLC_UI) { 2998 ether_type = htons(llc2->llc_un.type_snap.ether_type); 2999 snap = 1; 3000 } 3001 } 3002 3003 /* 3004 * If we're trying to filter bridge traffic, don't look at anything 3005 * other than IP and ARP traffic. If the filter doesn't understand 3006 * IPv6, don't allow IPv6 through the bridge either. This is lame 3007 * since if we really wanted, say, an AppleTalk filter, we are hosed, 3008 * but of course we don't have an AppleTalk filter to begin with. 3009 * (Note that since pfil doesn't understand ARP it will pass *ALL* 3010 * ARP traffic.) 3011 */ 3012 switch (ether_type) { 3013 case ETHERTYPE_ARP: 3014 case ETHERTYPE_REVARP: 3015 if (pfil_ipfw_arp == 0) 3016 return (0); /* Automatically pass */ 3017 break; 3018 3019 case ETHERTYPE_IP: 3020 #ifdef INET6 3021 case ETHERTYPE_IPV6: 3022 #endif /* INET6 */ 3023 break; 3024 default: 3025 /* 3026 * Check to see if the user wants to pass non-ip 3027 * packets, these will not be checked by pfil(9) and 3028 * passed unconditionally so the default is to drop. 3029 */ 3030 if (pfil_onlyip) 3031 goto bad; 3032 } 3033 3034 /* Strip off the Ethernet header and keep a copy. */ 3035 m_copydata(*mp, 0, ETHER_HDR_LEN, (caddr_t) &eh2); 3036 m_adj(*mp, ETHER_HDR_LEN); 3037 3038 /* Strip off snap header, if present */ 3039 if (snap) { 3040 m_copydata(*mp, 0, sizeof(struct llc), (caddr_t) &llc1); 3041 m_adj(*mp, sizeof(struct llc)); 3042 } 3043 3044 /* 3045 * Check the IP header for alignment and errors 3046 */ 3047 if (dir == PFIL_IN) { 3048 switch (ether_type) { 3049 case ETHERTYPE_IP: 3050 error = bridge_ip_checkbasic(mp); 3051 break; 3052 #ifdef INET6 3053 case ETHERTYPE_IPV6: 3054 error = bridge_ip6_checkbasic(mp); 3055 break; 3056 #endif /* INET6 */ 3057 default: 3058 error = 0; 3059 } 3060 if (error) 3061 goto bad; 3062 } 3063 3064 /* XXX this section is also in if_ethersubr.c */ 3065 // XXX PFIL_OUT or DIR_OUT ? 3066 if (V_ip_fw_chk_ptr && pfil_ipfw != 0 && 3067 dir == PFIL_OUT && ifp != NULL) { 3068 struct m_tag *mtag; 3069 3070 error = -1; 3071 /* fetch the start point from existing tags, if any */ 3072 mtag = m_tag_locate(*mp, MTAG_IPFW_RULE, 0, NULL); 3073 if (mtag == NULL) { 3074 args.rule.slot = 0; 3075 } else { 3076 struct ipfw_rule_ref *r; 3077 3078 /* XXX can we free the tag after use ? */ 3079 mtag->m_tag_id = PACKET_TAG_NONE; 3080 r = (struct ipfw_rule_ref *)(mtag + 1); 3081 /* packet already partially processed ? */ 3082 if (r->info & IPFW_ONEPASS) 3083 goto ipfwpass; 3084 args.rule = *r; 3085 } 3086 3087 args.m = *mp; 3088 args.oif = ifp; 3089 args.next_hop = NULL; 3090 args.eh = &eh2; 3091 args.inp = NULL; /* used by ipfw uid/gid/jail rules */ 3092 i = V_ip_fw_chk_ptr(&args); 3093 *mp = args.m; 3094 3095 if (*mp == NULL) 3096 return (error); 3097 3098 if (ip_dn_io_ptr && (i == IP_FW_DUMMYNET)) { 3099 3100 /* put the Ethernet header back on */ 3101 M_PREPEND(*mp, ETHER_HDR_LEN, M_DONTWAIT); 3102 if (*mp == NULL) 3103 return (error); 3104 bcopy(&eh2, mtod(*mp, caddr_t), ETHER_HDR_LEN); 3105 3106 /* 3107 * Pass the pkt to dummynet, which consumes it. The 3108 * packet will return to us via bridge_dummynet(). 3109 */ 3110 args.oif = ifp; 3111 ip_dn_io_ptr(mp, DIR_FWD | PROTO_IFB, &args); 3112 return (error); 3113 } 3114 3115 if (i != IP_FW_PASS) /* drop */ 3116 goto bad; 3117 } 3118 3119 ipfwpass: 3120 error = 0; 3121 3122 /* 3123 * Run the packet through pfil 3124 */ 3125 switch (ether_type) { 3126 case ETHERTYPE_IP: 3127 /* 3128 * before calling the firewall, swap fields the same as 3129 * IP does. here we assume the header is contiguous 3130 */ 3131 ip = mtod(*mp, struct ip *); 3132 3133 ip->ip_len = ntohs(ip->ip_len); 3134 ip->ip_off = ntohs(ip->ip_off); 3135 3136 /* 3137 * Run pfil on the member interface and the bridge, both can 3138 * be skipped by clearing pfil_member or pfil_bridge. 3139 * 3140 * Keep the order: 3141 * in_if -> bridge_if -> out_if 3142 */ 3143 if (pfil_bridge && dir == PFIL_OUT && bifp != NULL) 3144 error = pfil_run_hooks(&V_inet_pfil_hook, mp, bifp, 3145 dir, NULL); 3146 3147 if (*mp == NULL || error != 0) /* filter may consume */ 3148 break; 3149 3150 if (pfil_member && ifp != NULL) 3151 error = pfil_run_hooks(&V_inet_pfil_hook, mp, ifp, 3152 dir, NULL); 3153 3154 if (*mp == NULL || error != 0) /* filter may consume */ 3155 break; 3156 3157 if (pfil_bridge && dir == PFIL_IN && bifp != NULL) 3158 error = pfil_run_hooks(&V_inet_pfil_hook, mp, bifp, 3159 dir, NULL); 3160 3161 if (*mp == NULL || error != 0) /* filter may consume */ 3162 break; 3163 3164 /* check if we need to fragment the packet */ 3165 if (pfil_member && ifp != NULL && dir == PFIL_OUT) { 3166 i = (*mp)->m_pkthdr.len; 3167 if (i > ifp->if_mtu) { 3168 error = bridge_fragment(ifp, *mp, &eh2, snap, 3169 &llc1); 3170 return (error); 3171 } 3172 } 3173 3174 /* Recalculate the ip checksum and restore byte ordering */ 3175 ip = mtod(*mp, struct ip *); 3176 hlen = ip->ip_hl << 2; 3177 if (hlen < sizeof(struct ip)) 3178 goto bad; 3179 if (hlen > (*mp)->m_len) { 3180 if ((*mp = m_pullup(*mp, hlen)) == 0) 3181 goto bad; 3182 ip = mtod(*mp, struct ip *); 3183 if (ip == NULL) 3184 goto bad; 3185 } 3186 ip->ip_len = htons(ip->ip_len); 3187 ip->ip_off = htons(ip->ip_off); 3188 ip->ip_sum = 0; 3189 if (hlen == sizeof(struct ip)) 3190 ip->ip_sum = in_cksum_hdr(ip); 3191 else 3192 ip->ip_sum = in_cksum(*mp, hlen); 3193 3194 break; 3195 #ifdef INET6 3196 case ETHERTYPE_IPV6: 3197 if (pfil_bridge && dir == PFIL_OUT && bifp != NULL) 3198 error = pfil_run_hooks(&V_inet6_pfil_hook, mp, bifp, 3199 dir, NULL); 3200 3201 if (*mp == NULL || error != 0) /* filter may consume */ 3202 break; 3203 3204 if (pfil_member && ifp != NULL) 3205 error = pfil_run_hooks(&V_inet6_pfil_hook, mp, ifp, 3206 dir, NULL); 3207 3208 if (*mp == NULL || error != 0) /* filter may consume */ 3209 break; 3210 3211 if (pfil_bridge && dir == PFIL_IN && bifp != NULL) 3212 error = pfil_run_hooks(&V_inet6_pfil_hook, mp, bifp, 3213 dir, NULL); 3214 break; 3215 #endif 3216 default: 3217 error = 0; 3218 break; 3219 } 3220 3221 if (*mp == NULL) 3222 return (error); 3223 if (error != 0) 3224 goto bad; 3225 3226 error = -1; 3227 3228 /* 3229 * Finally, put everything back the way it was and return 3230 */ 3231 if (snap) { 3232 M_PREPEND(*mp, sizeof(struct llc), M_DONTWAIT); 3233 if (*mp == NULL) 3234 return (error); 3235 bcopy(&llc1, mtod(*mp, caddr_t), sizeof(struct llc)); 3236 } 3237 3238 M_PREPEND(*mp, ETHER_HDR_LEN, M_DONTWAIT); 3239 if (*mp == NULL) 3240 return (error); 3241 bcopy(&eh2, mtod(*mp, caddr_t), ETHER_HDR_LEN); 3242 3243 return (0); 3244 3245 bad: 3246 m_freem(*mp); 3247 *mp = NULL; 3248 return (error); 3249 } 3250 3251 /* 3252 * Perform basic checks on header size since 3253 * pfil assumes ip_input has already processed 3254 * it for it. Cut-and-pasted from ip_input.c. 3255 * Given how simple the IPv6 version is, 3256 * does the IPv4 version really need to be 3257 * this complicated? 3258 * 3259 * XXX Should we update ipstat here, or not? 3260 * XXX Right now we update ipstat but not 3261 * XXX csum_counter. 3262 */ 3263 static int 3264 bridge_ip_checkbasic(struct mbuf **mp) 3265 { 3266 struct mbuf *m = *mp; 3267 struct ip *ip; 3268 int len, hlen; 3269 u_short sum; 3270 3271 if (*mp == NULL) 3272 return (-1); 3273 3274 if (IP_HDR_ALIGNED_P(mtod(m, caddr_t)) == 0) { 3275 if ((m = m_copyup(m, sizeof(struct ip), 3276 (max_linkhdr + 3) & ~3)) == NULL) { 3277 /* XXXJRT new stat, please */ 3278 KMOD_IPSTAT_INC(ips_toosmall); 3279 goto bad; 3280 } 3281 } else if (__predict_false(m->m_len < sizeof (struct ip))) { 3282 if ((m = m_pullup(m, sizeof (struct ip))) == NULL) { 3283 KMOD_IPSTAT_INC(ips_toosmall); 3284 goto bad; 3285 } 3286 } 3287 ip = mtod(m, struct ip *); 3288 if (ip == NULL) goto bad; 3289 3290 if (ip->ip_v != IPVERSION) { 3291 KMOD_IPSTAT_INC(ips_badvers); 3292 goto bad; 3293 } 3294 hlen = ip->ip_hl << 2; 3295 if (hlen < sizeof(struct ip)) { /* minimum header length */ 3296 KMOD_IPSTAT_INC(ips_badhlen); 3297 goto bad; 3298 } 3299 if (hlen > m->m_len) { 3300 if ((m = m_pullup(m, hlen)) == 0) { 3301 KMOD_IPSTAT_INC(ips_badhlen); 3302 goto bad; 3303 } 3304 ip = mtod(m, struct ip *); 3305 if (ip == NULL) goto bad; 3306 } 3307 3308 if (m->m_pkthdr.csum_flags & CSUM_IP_CHECKED) { 3309 sum = !(m->m_pkthdr.csum_flags & CSUM_IP_VALID); 3310 } else { 3311 if (hlen == sizeof(struct ip)) { 3312 sum = in_cksum_hdr(ip); 3313 } else { 3314 sum = in_cksum(m, hlen); 3315 } 3316 } 3317 if (sum) { 3318 KMOD_IPSTAT_INC(ips_badsum); 3319 goto bad; 3320 } 3321 3322 /* Retrieve the packet length. */ 3323 len = ntohs(ip->ip_len); 3324 3325 /* 3326 * Check for additional length bogosity 3327 */ 3328 if (len < hlen) { 3329 KMOD_IPSTAT_INC(ips_badlen); 3330 goto bad; 3331 } 3332 3333 /* 3334 * Check that the amount of data in the buffers 3335 * is as at least much as the IP header would have us expect. 3336 * Drop packet if shorter than we expect. 3337 */ 3338 if (m->m_pkthdr.len < len) { 3339 KMOD_IPSTAT_INC(ips_tooshort); 3340 goto bad; 3341 } 3342 3343 /* Checks out, proceed */ 3344 *mp = m; 3345 return (0); 3346 3347 bad: 3348 *mp = m; 3349 return (-1); 3350 } 3351 3352 #ifdef INET6 3353 /* 3354 * Same as above, but for IPv6. 3355 * Cut-and-pasted from ip6_input.c. 3356 * XXX Should we update ip6stat, or not? 3357 */ 3358 static int 3359 bridge_ip6_checkbasic(struct mbuf **mp) 3360 { 3361 struct mbuf *m = *mp; 3362 struct ip6_hdr *ip6; 3363 3364 /* 3365 * If the IPv6 header is not aligned, slurp it up into a new 3366 * mbuf with space for link headers, in the event we forward 3367 * it. Otherwise, if it is aligned, make sure the entire base 3368 * IPv6 header is in the first mbuf of the chain. 3369 */ 3370 if (IP6_HDR_ALIGNED_P(mtod(m, caddr_t)) == 0) { 3371 struct ifnet *inifp = m->m_pkthdr.rcvif; 3372 if ((m = m_copyup(m, sizeof(struct ip6_hdr), 3373 (max_linkhdr + 3) & ~3)) == NULL) { 3374 /* XXXJRT new stat, please */ 3375 V_ip6stat.ip6s_toosmall++; 3376 in6_ifstat_inc(inifp, ifs6_in_hdrerr); 3377 goto bad; 3378 } 3379 } else if (__predict_false(m->m_len < sizeof(struct ip6_hdr))) { 3380 struct ifnet *inifp = m->m_pkthdr.rcvif; 3381 if ((m = m_pullup(m, sizeof(struct ip6_hdr))) == NULL) { 3382 V_ip6stat.ip6s_toosmall++; 3383 in6_ifstat_inc(inifp, ifs6_in_hdrerr); 3384 goto bad; 3385 } 3386 } 3387 3388 ip6 = mtod(m, struct ip6_hdr *); 3389 3390 if ((ip6->ip6_vfc & IPV6_VERSION_MASK) != IPV6_VERSION) { 3391 V_ip6stat.ip6s_badvers++; 3392 in6_ifstat_inc(m->m_pkthdr.rcvif, ifs6_in_hdrerr); 3393 goto bad; 3394 } 3395 3396 /* Checks out, proceed */ 3397 *mp = m; 3398 return (0); 3399 3400 bad: 3401 *mp = m; 3402 return (-1); 3403 } 3404 #endif /* INET6 */ 3405 3406 /* 3407 * bridge_fragment: 3408 * 3409 * Return a fragmented mbuf chain. 3410 */ 3411 static int 3412 bridge_fragment(struct ifnet *ifp, struct mbuf *m, struct ether_header *eh, 3413 int snap, struct llc *llc) 3414 { 3415 struct mbuf *m0; 3416 struct ip *ip; 3417 int error = -1; 3418 3419 if (m->m_len < sizeof(struct ip) && 3420 (m = m_pullup(m, sizeof(struct ip))) == NULL) 3421 goto out; 3422 ip = mtod(m, struct ip *); 3423 3424 error = ip_fragment(ip, &m, ifp->if_mtu, ifp->if_hwassist, 3425 CSUM_DELAY_IP); 3426 if (error) 3427 goto out; 3428 3429 /* walk the chain and re-add the Ethernet header */ 3430 for (m0 = m; m0; m0 = m0->m_nextpkt) { 3431 if (error == 0) { 3432 if (snap) { 3433 M_PREPEND(m0, sizeof(struct llc), M_DONTWAIT); 3434 if (m0 == NULL) { 3435 error = ENOBUFS; 3436 continue; 3437 } 3438 bcopy(llc, mtod(m0, caddr_t), 3439 sizeof(struct llc)); 3440 } 3441 M_PREPEND(m0, ETHER_HDR_LEN, M_DONTWAIT); 3442 if (m0 == NULL) { 3443 error = ENOBUFS; 3444 continue; 3445 } 3446 bcopy(eh, mtod(m0, caddr_t), ETHER_HDR_LEN); 3447 } else 3448 m_freem(m); 3449 } 3450 3451 if (error == 0) 3452 KMOD_IPSTAT_INC(ips_fragmented); 3453 3454 return (error); 3455 3456 out: 3457 if (m != NULL) 3458 m_freem(m); 3459 return (error); 3460 } 3461