1 /* $NetBSD: if_bridge.c,v 1.31 2005/06/01 19:45:34 jdc Exp $ */ 2 3 /* 4 * Copyright 2001 Wasabi Systems, Inc. 5 * All rights reserved. 6 * 7 * Written by Jason R. Thorpe for Wasabi Systems, Inc. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 3. All advertising materials mentioning features or use of this software 18 * must display the following acknowledgement: 19 * This product includes software developed for the NetBSD Project by 20 * Wasabi Systems, Inc. 21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse 22 * or promote products derived from this software without specific prior 23 * written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND 26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC 29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 35 * POSSIBILITY OF SUCH DAMAGE. 36 */ 37 38 /* 39 * Copyright (c) 1999, 2000 Jason L. Wright (jason@thought.net) 40 * All rights reserved. 41 * 42 * Redistribution and use in source and binary forms, with or without 43 * modification, are permitted provided that the following conditions 44 * are met: 45 * 1. Redistributions of source code must retain the above copyright 46 * notice, this list of conditions and the following disclaimer. 47 * 2. Redistributions in binary form must reproduce the above copyright 48 * notice, this list of conditions and the following disclaimer in the 49 * documentation and/or other materials provided with the distribution. 50 * 51 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 52 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 53 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 54 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, 55 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 56 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 57 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 58 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 59 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN 60 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 61 * POSSIBILITY OF SUCH DAMAGE. 62 * 63 * OpenBSD: if_bridge.c,v 1.60 2001/06/15 03:38:33 itojun Exp 64 */ 65 66 /* 67 * Network interface bridge support. 68 * 69 * TODO: 70 * 71 * - Currently only supports Ethernet-like interfaces (Ethernet, 72 * 802.11, VLANs on Ethernet, etc.) Figure out a nice way 73 * to bridge other types of interfaces (FDDI-FDDI, and maybe 74 * consider heterogenous bridges). 75 */ 76 77 #include <sys/cdefs.h> 78 __FBSDID("$FreeBSD$"); 79 80 #include "opt_inet.h" 81 #include "opt_inet6.h" 82 83 #include <sys/param.h> 84 #include <sys/mbuf.h> 85 #include <sys/malloc.h> 86 #include <sys/protosw.h> 87 #include <sys/systm.h> 88 #include <sys/jail.h> 89 #include <sys/time.h> 90 #include <sys/socket.h> /* for net/if.h */ 91 #include <sys/sockio.h> 92 #include <sys/ctype.h> /* string functions */ 93 #include <sys/kernel.h> 94 #include <sys/random.h> 95 #include <sys/syslog.h> 96 #include <sys/sysctl.h> 97 #include <vm/uma.h> 98 #include <sys/module.h> 99 #include <sys/priv.h> 100 #include <sys/proc.h> 101 #include <sys/lock.h> 102 #include <sys/mutex.h> 103 #include <sys/rwlock.h> 104 105 #include <net/bpf.h> 106 #include <net/if.h> 107 #include <net/if_clone.h> 108 #include <net/if_dl.h> 109 #include <net/if_types.h> 110 #include <net/if_var.h> 111 #include <net/pfil.h> 112 #include <net/vnet.h> 113 114 #include <netinet/in.h> /* for struct arpcom */ 115 #include <netinet/in_systm.h> 116 #include <netinet/in_var.h> 117 #include <netinet/ip.h> 118 #include <netinet/ip_var.h> 119 #ifdef INET6 120 #include <netinet/ip6.h> 121 #include <netinet6/ip6_var.h> 122 #endif 123 #if defined(INET) || defined(INET6) 124 #include <netinet/ip_carp.h> 125 #endif 126 #include <machine/in_cksum.h> 127 #include <netinet/if_ether.h> /* for struct arpcom */ 128 #include <net/bridgestp.h> 129 #include <net/if_bridgevar.h> 130 #include <net/if_llc.h> 131 #include <net/if_vlan_var.h> 132 133 #include <net/route.h> 134 #include <netinet/ip_fw.h> 135 #include <netinet/ipfw/ip_fw_private.h> 136 137 /* 138 * Size of the route hash table. Must be a power of two. 139 */ 140 #ifndef BRIDGE_RTHASH_SIZE 141 #define BRIDGE_RTHASH_SIZE 1024 142 #endif 143 144 #define BRIDGE_RTHASH_MASK (BRIDGE_RTHASH_SIZE - 1) 145 146 /* 147 * Maximum number of addresses to cache. 148 */ 149 #ifndef BRIDGE_RTABLE_MAX 150 #define BRIDGE_RTABLE_MAX 100 151 #endif 152 153 /* 154 * Timeout (in seconds) for entries learned dynamically. 155 */ 156 #ifndef BRIDGE_RTABLE_TIMEOUT 157 #define BRIDGE_RTABLE_TIMEOUT (20 * 60) /* same as ARP */ 158 #endif 159 160 /* 161 * Number of seconds between walks of the route list. 162 */ 163 #ifndef BRIDGE_RTABLE_PRUNE_PERIOD 164 #define BRIDGE_RTABLE_PRUNE_PERIOD (5 * 60) 165 #endif 166 167 /* 168 * List of capabilities to possibly mask on the member interface. 169 */ 170 #define BRIDGE_IFCAPS_MASK (IFCAP_TOE|IFCAP_TSO|IFCAP_TXCSUM) 171 172 /* 173 * List of capabilities to strip 174 */ 175 #define BRIDGE_IFCAPS_STRIP IFCAP_LRO 176 177 /* 178 * Bridge interface list entry. 179 */ 180 struct bridge_iflist { 181 LIST_ENTRY(bridge_iflist) bif_next; 182 struct ifnet *bif_ifp; /* member if */ 183 struct bstp_port bif_stp; /* STP state */ 184 uint32_t bif_flags; /* member if flags */ 185 int bif_savedcaps; /* saved capabilities */ 186 uint32_t bif_addrmax; /* max # of addresses */ 187 uint32_t bif_addrcnt; /* cur. # of addresses */ 188 uint32_t bif_addrexceeded;/* # of address violations */ 189 }; 190 191 /* 192 * Bridge route node. 193 */ 194 struct bridge_rtnode { 195 LIST_ENTRY(bridge_rtnode) brt_hash; /* hash table linkage */ 196 LIST_ENTRY(bridge_rtnode) brt_list; /* list linkage */ 197 struct bridge_iflist *brt_dst; /* destination if */ 198 unsigned long brt_expire; /* expiration time */ 199 uint8_t brt_flags; /* address flags */ 200 uint8_t brt_addr[ETHER_ADDR_LEN]; 201 uint16_t brt_vlan; /* vlan id */ 202 }; 203 #define brt_ifp brt_dst->bif_ifp 204 205 /* 206 * Software state for each bridge. 207 */ 208 struct bridge_softc { 209 struct ifnet *sc_ifp; /* make this an interface */ 210 LIST_ENTRY(bridge_softc) sc_list; 211 struct mtx sc_mtx; 212 struct cv sc_cv; 213 uint32_t sc_brtmax; /* max # of addresses */ 214 uint32_t sc_brtcnt; /* cur. # of addresses */ 215 uint32_t sc_brttimeout; /* rt timeout in seconds */ 216 struct callout sc_brcallout; /* bridge callout */ 217 uint32_t sc_iflist_ref; /* refcount for sc_iflist */ 218 uint32_t sc_iflist_xcnt; /* refcount for sc_iflist */ 219 LIST_HEAD(, bridge_iflist) sc_iflist; /* member interface list */ 220 LIST_HEAD(, bridge_rtnode) *sc_rthash; /* our forwarding table */ 221 LIST_HEAD(, bridge_rtnode) sc_rtlist; /* list version of above */ 222 uint32_t sc_rthash_key; /* key for hash */ 223 LIST_HEAD(, bridge_iflist) sc_spanlist; /* span ports list */ 224 struct bstp_state sc_stp; /* STP state */ 225 uint32_t sc_brtexceeded; /* # of cache drops */ 226 struct ifnet *sc_ifaddr; /* member mac copied from */ 227 u_char sc_defaddr[6]; /* Default MAC address */ 228 }; 229 230 static struct mtx bridge_list_mtx; 231 eventhandler_tag bridge_detach_cookie = NULL; 232 233 int bridge_rtable_prune_period = BRIDGE_RTABLE_PRUNE_PERIOD; 234 235 uma_zone_t bridge_rtnode_zone; 236 237 static int bridge_clone_create(struct if_clone *, int, caddr_t); 238 static void bridge_clone_destroy(struct ifnet *); 239 240 static int bridge_ioctl(struct ifnet *, u_long, caddr_t); 241 static void bridge_mutecaps(struct bridge_softc *); 242 static void bridge_set_ifcap(struct bridge_softc *, struct bridge_iflist *, 243 int); 244 static void bridge_ifdetach(void *arg __unused, struct ifnet *); 245 static void bridge_init(void *); 246 static void bridge_dummynet(struct mbuf *, struct ifnet *); 247 static void bridge_stop(struct ifnet *, int); 248 static void bridge_start(struct ifnet *); 249 static struct mbuf *bridge_input(struct ifnet *, struct mbuf *); 250 static int bridge_output(struct ifnet *, struct mbuf *, struct sockaddr *, 251 struct rtentry *); 252 static void bridge_enqueue(struct bridge_softc *, struct ifnet *, 253 struct mbuf *); 254 static void bridge_rtdelete(struct bridge_softc *, struct ifnet *ifp, int); 255 256 static void bridge_forward(struct bridge_softc *, struct bridge_iflist *, 257 struct mbuf *m); 258 259 static void bridge_timer(void *); 260 261 static void bridge_broadcast(struct bridge_softc *, struct ifnet *, 262 struct mbuf *, int); 263 static void bridge_span(struct bridge_softc *, struct mbuf *); 264 265 static int bridge_rtupdate(struct bridge_softc *, const uint8_t *, 266 uint16_t, struct bridge_iflist *, int, uint8_t); 267 static struct ifnet *bridge_rtlookup(struct bridge_softc *, const uint8_t *, 268 uint16_t); 269 static void bridge_rttrim(struct bridge_softc *); 270 static void bridge_rtage(struct bridge_softc *); 271 static void bridge_rtflush(struct bridge_softc *, int); 272 static int bridge_rtdaddr(struct bridge_softc *, const uint8_t *, 273 uint16_t); 274 275 static int bridge_rtable_init(struct bridge_softc *); 276 static void bridge_rtable_fini(struct bridge_softc *); 277 278 static int bridge_rtnode_addr_cmp(const uint8_t *, const uint8_t *); 279 static struct bridge_rtnode *bridge_rtnode_lookup(struct bridge_softc *, 280 const uint8_t *, uint16_t); 281 static int bridge_rtnode_insert(struct bridge_softc *, 282 struct bridge_rtnode *); 283 static void bridge_rtnode_destroy(struct bridge_softc *, 284 struct bridge_rtnode *); 285 static void bridge_rtable_expire(struct ifnet *, int); 286 static void bridge_state_change(struct ifnet *, int); 287 288 static struct bridge_iflist *bridge_lookup_member(struct bridge_softc *, 289 const char *name); 290 static struct bridge_iflist *bridge_lookup_member_if(struct bridge_softc *, 291 struct ifnet *ifp); 292 static void bridge_delete_member(struct bridge_softc *, 293 struct bridge_iflist *, int); 294 static void bridge_delete_span(struct bridge_softc *, 295 struct bridge_iflist *); 296 297 static int bridge_ioctl_add(struct bridge_softc *, void *); 298 static int bridge_ioctl_del(struct bridge_softc *, void *); 299 static int bridge_ioctl_gifflags(struct bridge_softc *, void *); 300 static int bridge_ioctl_sifflags(struct bridge_softc *, void *); 301 static int bridge_ioctl_scache(struct bridge_softc *, void *); 302 static int bridge_ioctl_gcache(struct bridge_softc *, void *); 303 static int bridge_ioctl_gifs(struct bridge_softc *, void *); 304 static int bridge_ioctl_rts(struct bridge_softc *, void *); 305 static int bridge_ioctl_saddr(struct bridge_softc *, void *); 306 static int bridge_ioctl_sto(struct bridge_softc *, void *); 307 static int bridge_ioctl_gto(struct bridge_softc *, void *); 308 static int bridge_ioctl_daddr(struct bridge_softc *, void *); 309 static int bridge_ioctl_flush(struct bridge_softc *, void *); 310 static int bridge_ioctl_gpri(struct bridge_softc *, void *); 311 static int bridge_ioctl_spri(struct bridge_softc *, void *); 312 static int bridge_ioctl_ght(struct bridge_softc *, void *); 313 static int bridge_ioctl_sht(struct bridge_softc *, void *); 314 static int bridge_ioctl_gfd(struct bridge_softc *, void *); 315 static int bridge_ioctl_sfd(struct bridge_softc *, void *); 316 static int bridge_ioctl_gma(struct bridge_softc *, void *); 317 static int bridge_ioctl_sma(struct bridge_softc *, void *); 318 static int bridge_ioctl_sifprio(struct bridge_softc *, void *); 319 static int bridge_ioctl_sifcost(struct bridge_softc *, void *); 320 static int bridge_ioctl_sifmaxaddr(struct bridge_softc *, void *); 321 static int bridge_ioctl_addspan(struct bridge_softc *, void *); 322 static int bridge_ioctl_delspan(struct bridge_softc *, void *); 323 static int bridge_ioctl_gbparam(struct bridge_softc *, void *); 324 static int bridge_ioctl_grte(struct bridge_softc *, void *); 325 static int bridge_ioctl_gifsstp(struct bridge_softc *, void *); 326 static int bridge_ioctl_sproto(struct bridge_softc *, void *); 327 static int bridge_ioctl_stxhc(struct bridge_softc *, void *); 328 static int bridge_pfil(struct mbuf **, struct ifnet *, struct ifnet *, 329 int); 330 static int bridge_ip_checkbasic(struct mbuf **mp); 331 #ifdef INET6 332 static int bridge_ip6_checkbasic(struct mbuf **mp); 333 #endif /* INET6 */ 334 static int bridge_fragment(struct ifnet *, struct mbuf *, 335 struct ether_header *, int, struct llc *); 336 337 /* The default bridge vlan is 1 (IEEE 802.1Q-2003 Table 9-2) */ 338 #define VLANTAGOF(_m) \ 339 (_m->m_flags & M_VLANTAG) ? EVL_VLANOFTAG(_m->m_pkthdr.ether_vtag) : 1 340 341 static struct bstp_cb_ops bridge_ops = { 342 .bcb_state = bridge_state_change, 343 .bcb_rtage = bridge_rtable_expire 344 }; 345 346 SYSCTL_DECL(_net_link); 347 static SYSCTL_NODE(_net_link, IFT_BRIDGE, bridge, CTLFLAG_RW, 0, "Bridge"); 348 349 static int pfil_onlyip = 1; /* only pass IP[46] packets when pfil is enabled */ 350 static int pfil_bridge = 1; /* run pfil hooks on the bridge interface */ 351 static int pfil_member = 1; /* run pfil hooks on the member interface */ 352 static int pfil_ipfw = 0; /* layer2 filter with ipfw */ 353 static int pfil_ipfw_arp = 0; /* layer2 filter with ipfw */ 354 static int pfil_local_phys = 0; /* run pfil hooks on the physical interface for 355 locally destined packets */ 356 static int log_stp = 0; /* log STP state changes */ 357 static int bridge_inherit_mac = 0; /* share MAC with first bridge member */ 358 TUNABLE_INT("net.link.bridge.pfil_onlyip", &pfil_onlyip); 359 SYSCTL_INT(_net_link_bridge, OID_AUTO, pfil_onlyip, CTLFLAG_RW, 360 &pfil_onlyip, 0, "Only pass IP packets when pfil is enabled"); 361 TUNABLE_INT("net.link.bridge.ipfw_arp", &pfil_ipfw_arp); 362 SYSCTL_INT(_net_link_bridge, OID_AUTO, ipfw_arp, CTLFLAG_RW, 363 &pfil_ipfw_arp, 0, "Filter ARP packets through IPFW layer2"); 364 TUNABLE_INT("net.link.bridge.pfil_bridge", &pfil_bridge); 365 SYSCTL_INT(_net_link_bridge, OID_AUTO, pfil_bridge, CTLFLAG_RW, 366 &pfil_bridge, 0, "Packet filter on the bridge interface"); 367 TUNABLE_INT("net.link.bridge.pfil_member", &pfil_member); 368 SYSCTL_INT(_net_link_bridge, OID_AUTO, pfil_member, CTLFLAG_RW, 369 &pfil_member, 0, "Packet filter on the member interface"); 370 TUNABLE_INT("net.link.bridge.pfil_local_phys", &pfil_local_phys); 371 SYSCTL_INT(_net_link_bridge, OID_AUTO, pfil_local_phys, CTLFLAG_RW, 372 &pfil_local_phys, 0, 373 "Packet filter on the physical interface for locally destined packets"); 374 TUNABLE_INT("net.link.bridge.log_stp", &log_stp); 375 SYSCTL_INT(_net_link_bridge, OID_AUTO, log_stp, CTLFLAG_RW, 376 &log_stp, 0, "Log STP state changes"); 377 TUNABLE_INT("net.link.bridge.inherit_mac", &bridge_inherit_mac); 378 SYSCTL_INT(_net_link_bridge, OID_AUTO, inherit_mac, CTLFLAG_RW, 379 &bridge_inherit_mac, 0, 380 "Inherit MAC address from the first bridge member"); 381 382 struct bridge_control { 383 int (*bc_func)(struct bridge_softc *, void *); 384 int bc_argsize; 385 int bc_flags; 386 }; 387 388 #define BC_F_COPYIN 0x01 /* copy arguments in */ 389 #define BC_F_COPYOUT 0x02 /* copy arguments out */ 390 #define BC_F_SUSER 0x04 /* do super-user check */ 391 392 const struct bridge_control bridge_control_table[] = { 393 { bridge_ioctl_add, sizeof(struct ifbreq), 394 BC_F_COPYIN|BC_F_SUSER }, 395 { bridge_ioctl_del, sizeof(struct ifbreq), 396 BC_F_COPYIN|BC_F_SUSER }, 397 398 { bridge_ioctl_gifflags, sizeof(struct ifbreq), 399 BC_F_COPYIN|BC_F_COPYOUT }, 400 { bridge_ioctl_sifflags, sizeof(struct ifbreq), 401 BC_F_COPYIN|BC_F_SUSER }, 402 403 { bridge_ioctl_scache, sizeof(struct ifbrparam), 404 BC_F_COPYIN|BC_F_SUSER }, 405 { bridge_ioctl_gcache, sizeof(struct ifbrparam), 406 BC_F_COPYOUT }, 407 408 { bridge_ioctl_gifs, sizeof(struct ifbifconf), 409 BC_F_COPYIN|BC_F_COPYOUT }, 410 { bridge_ioctl_rts, sizeof(struct ifbaconf), 411 BC_F_COPYIN|BC_F_COPYOUT }, 412 413 { bridge_ioctl_saddr, sizeof(struct ifbareq), 414 BC_F_COPYIN|BC_F_SUSER }, 415 416 { bridge_ioctl_sto, sizeof(struct ifbrparam), 417 BC_F_COPYIN|BC_F_SUSER }, 418 { bridge_ioctl_gto, sizeof(struct ifbrparam), 419 BC_F_COPYOUT }, 420 421 { bridge_ioctl_daddr, sizeof(struct ifbareq), 422 BC_F_COPYIN|BC_F_SUSER }, 423 424 { bridge_ioctl_flush, sizeof(struct ifbreq), 425 BC_F_COPYIN|BC_F_SUSER }, 426 427 { bridge_ioctl_gpri, sizeof(struct ifbrparam), 428 BC_F_COPYOUT }, 429 { bridge_ioctl_spri, sizeof(struct ifbrparam), 430 BC_F_COPYIN|BC_F_SUSER }, 431 432 { bridge_ioctl_ght, sizeof(struct ifbrparam), 433 BC_F_COPYOUT }, 434 { bridge_ioctl_sht, sizeof(struct ifbrparam), 435 BC_F_COPYIN|BC_F_SUSER }, 436 437 { bridge_ioctl_gfd, sizeof(struct ifbrparam), 438 BC_F_COPYOUT }, 439 { bridge_ioctl_sfd, sizeof(struct ifbrparam), 440 BC_F_COPYIN|BC_F_SUSER }, 441 442 { bridge_ioctl_gma, sizeof(struct ifbrparam), 443 BC_F_COPYOUT }, 444 { bridge_ioctl_sma, sizeof(struct ifbrparam), 445 BC_F_COPYIN|BC_F_SUSER }, 446 447 { bridge_ioctl_sifprio, sizeof(struct ifbreq), 448 BC_F_COPYIN|BC_F_SUSER }, 449 450 { bridge_ioctl_sifcost, sizeof(struct ifbreq), 451 BC_F_COPYIN|BC_F_SUSER }, 452 453 { bridge_ioctl_addspan, sizeof(struct ifbreq), 454 BC_F_COPYIN|BC_F_SUSER }, 455 { bridge_ioctl_delspan, sizeof(struct ifbreq), 456 BC_F_COPYIN|BC_F_SUSER }, 457 458 { bridge_ioctl_gbparam, sizeof(struct ifbropreq), 459 BC_F_COPYOUT }, 460 461 { bridge_ioctl_grte, sizeof(struct ifbrparam), 462 BC_F_COPYOUT }, 463 464 { bridge_ioctl_gifsstp, sizeof(struct ifbpstpconf), 465 BC_F_COPYIN|BC_F_COPYOUT }, 466 467 { bridge_ioctl_sproto, sizeof(struct ifbrparam), 468 BC_F_COPYIN|BC_F_SUSER }, 469 470 { bridge_ioctl_stxhc, sizeof(struct ifbrparam), 471 BC_F_COPYIN|BC_F_SUSER }, 472 473 { bridge_ioctl_sifmaxaddr, sizeof(struct ifbreq), 474 BC_F_COPYIN|BC_F_SUSER }, 475 476 }; 477 const int bridge_control_table_size = 478 sizeof(bridge_control_table) / sizeof(bridge_control_table[0]); 479 480 LIST_HEAD(, bridge_softc) bridge_list; 481 482 IFC_SIMPLE_DECLARE(bridge, 0); 483 484 static int 485 bridge_modevent(module_t mod, int type, void *data) 486 { 487 488 switch (type) { 489 case MOD_LOAD: 490 mtx_init(&bridge_list_mtx, "if_bridge list", NULL, MTX_DEF); 491 if_clone_attach(&bridge_cloner); 492 bridge_rtnode_zone = uma_zcreate("bridge_rtnode", 493 sizeof(struct bridge_rtnode), NULL, NULL, NULL, NULL, 494 UMA_ALIGN_PTR, 0); 495 LIST_INIT(&bridge_list); 496 bridge_input_p = bridge_input; 497 bridge_output_p = bridge_output; 498 bridge_dn_p = bridge_dummynet; 499 bridge_detach_cookie = EVENTHANDLER_REGISTER( 500 ifnet_departure_event, bridge_ifdetach, NULL, 501 EVENTHANDLER_PRI_ANY); 502 break; 503 case MOD_UNLOAD: 504 EVENTHANDLER_DEREGISTER(ifnet_departure_event, 505 bridge_detach_cookie); 506 if_clone_detach(&bridge_cloner); 507 uma_zdestroy(bridge_rtnode_zone); 508 bridge_input_p = NULL; 509 bridge_output_p = NULL; 510 bridge_dn_p = NULL; 511 mtx_destroy(&bridge_list_mtx); 512 break; 513 default: 514 return (EOPNOTSUPP); 515 } 516 return (0); 517 } 518 519 static moduledata_t bridge_mod = { 520 "if_bridge", 521 bridge_modevent, 522 0 523 }; 524 525 DECLARE_MODULE(if_bridge, bridge_mod, SI_SUB_PSEUDO, SI_ORDER_ANY); 526 MODULE_DEPEND(if_bridge, bridgestp, 1, 1, 1); 527 528 /* 529 * handler for net.link.bridge.pfil_ipfw 530 */ 531 static int 532 sysctl_pfil_ipfw(SYSCTL_HANDLER_ARGS) 533 { 534 int enable = pfil_ipfw; 535 int error; 536 537 error = sysctl_handle_int(oidp, &enable, 0, req); 538 enable = (enable) ? 1 : 0; 539 540 if (enable != pfil_ipfw) { 541 pfil_ipfw = enable; 542 543 /* 544 * Disable pfil so that ipfw doesnt run twice, if the user 545 * really wants both then they can re-enable pfil_bridge and/or 546 * pfil_member. Also allow non-ip packets as ipfw can filter by 547 * layer2 type. 548 */ 549 if (pfil_ipfw) { 550 pfil_onlyip = 0; 551 pfil_bridge = 0; 552 pfil_member = 0; 553 } 554 } 555 556 return (error); 557 } 558 SYSCTL_PROC(_net_link_bridge, OID_AUTO, ipfw, CTLTYPE_INT|CTLFLAG_RW, 559 &pfil_ipfw, 0, &sysctl_pfil_ipfw, "I", "Layer2 filter with IPFW"); 560 561 /* 562 * bridge_clone_create: 563 * 564 * Create a new bridge instance. 565 */ 566 static int 567 bridge_clone_create(struct if_clone *ifc, int unit, caddr_t params) 568 { 569 struct bridge_softc *sc, *sc2; 570 struct ifnet *bifp, *ifp; 571 int fb, retry; 572 unsigned long hostid; 573 574 sc = malloc(sizeof(*sc), M_DEVBUF, M_WAITOK|M_ZERO); 575 ifp = sc->sc_ifp = if_alloc(IFT_ETHER); 576 if (ifp == NULL) { 577 free(sc, M_DEVBUF); 578 return (ENOSPC); 579 } 580 581 BRIDGE_LOCK_INIT(sc); 582 sc->sc_brtmax = BRIDGE_RTABLE_MAX; 583 sc->sc_brttimeout = BRIDGE_RTABLE_TIMEOUT; 584 585 /* Initialize our routing table. */ 586 bridge_rtable_init(sc); 587 588 callout_init_mtx(&sc->sc_brcallout, &sc->sc_mtx, 0); 589 590 LIST_INIT(&sc->sc_iflist); 591 LIST_INIT(&sc->sc_spanlist); 592 593 ifp->if_softc = sc; 594 if_initname(ifp, ifc->ifc_name, unit); 595 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 596 ifp->if_ioctl = bridge_ioctl; 597 ifp->if_start = bridge_start; 598 ifp->if_init = bridge_init; 599 ifp->if_type = IFT_BRIDGE; 600 IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen); 601 ifp->if_snd.ifq_drv_maxlen = ifqmaxlen; 602 IFQ_SET_READY(&ifp->if_snd); 603 604 /* 605 * Generate an ethernet address with a locally administered address. 606 * 607 * Since we are using random ethernet addresses for the bridge, it is 608 * possible that we might have address collisions, so make sure that 609 * this hardware address isn't already in use on another bridge. 610 * The first try uses the hostid and falls back to arc4rand(). 611 */ 612 fb = 0; 613 getcredhostid(curthread->td_ucred, &hostid); 614 for (retry = 1; retry != 0;) { 615 if (fb || hostid == 0) { 616 arc4rand(sc->sc_defaddr, ETHER_ADDR_LEN, 1); 617 sc->sc_defaddr[0] &= ~1;/* clear multicast bit */ 618 sc->sc_defaddr[0] |= 2; /* set the LAA bit */ 619 } else { 620 sc->sc_defaddr[0] = 0x2; 621 sc->sc_defaddr[1] = (hostid >> 24) & 0xff; 622 sc->sc_defaddr[2] = (hostid >> 16) & 0xff; 623 sc->sc_defaddr[3] = (hostid >> 8 ) & 0xff; 624 sc->sc_defaddr[4] = hostid & 0xff; 625 sc->sc_defaddr[5] = ifp->if_dunit & 0xff; 626 } 627 628 fb = 1; 629 retry = 0; 630 mtx_lock(&bridge_list_mtx); 631 LIST_FOREACH(sc2, &bridge_list, sc_list) { 632 bifp = sc2->sc_ifp; 633 if (memcmp(sc->sc_defaddr, 634 IF_LLADDR(bifp), ETHER_ADDR_LEN) == 0) 635 retry = 1; 636 } 637 mtx_unlock(&bridge_list_mtx); 638 } 639 640 bstp_attach(&sc->sc_stp, &bridge_ops); 641 ether_ifattach(ifp, sc->sc_defaddr); 642 /* Now undo some of the damage... */ 643 ifp->if_baudrate = 0; 644 ifp->if_type = IFT_BRIDGE; 645 646 mtx_lock(&bridge_list_mtx); 647 LIST_INSERT_HEAD(&bridge_list, sc, sc_list); 648 mtx_unlock(&bridge_list_mtx); 649 650 return (0); 651 } 652 653 /* 654 * bridge_clone_destroy: 655 * 656 * Destroy a bridge instance. 657 */ 658 static void 659 bridge_clone_destroy(struct ifnet *ifp) 660 { 661 struct bridge_softc *sc = ifp->if_softc; 662 struct bridge_iflist *bif; 663 664 BRIDGE_LOCK(sc); 665 666 bridge_stop(ifp, 1); 667 ifp->if_flags &= ~IFF_UP; 668 669 while ((bif = LIST_FIRST(&sc->sc_iflist)) != NULL) 670 bridge_delete_member(sc, bif, 0); 671 672 while ((bif = LIST_FIRST(&sc->sc_spanlist)) != NULL) { 673 bridge_delete_span(sc, bif); 674 } 675 676 BRIDGE_UNLOCK(sc); 677 678 callout_drain(&sc->sc_brcallout); 679 680 mtx_lock(&bridge_list_mtx); 681 LIST_REMOVE(sc, sc_list); 682 mtx_unlock(&bridge_list_mtx); 683 684 bstp_detach(&sc->sc_stp); 685 ether_ifdetach(ifp); 686 if_free(ifp); 687 688 /* Tear down the routing table. */ 689 bridge_rtable_fini(sc); 690 691 BRIDGE_LOCK_DESTROY(sc); 692 free(sc, M_DEVBUF); 693 } 694 695 /* 696 * bridge_ioctl: 697 * 698 * Handle a control request from the operator. 699 */ 700 static int 701 bridge_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 702 { 703 struct bridge_softc *sc = ifp->if_softc; 704 struct ifreq *ifr = (struct ifreq *)data; 705 struct bridge_iflist *bif; 706 struct thread *td = curthread; 707 union { 708 struct ifbreq ifbreq; 709 struct ifbifconf ifbifconf; 710 struct ifbareq ifbareq; 711 struct ifbaconf ifbaconf; 712 struct ifbrparam ifbrparam; 713 struct ifbropreq ifbropreq; 714 } args; 715 struct ifdrv *ifd = (struct ifdrv *) data; 716 const struct bridge_control *bc; 717 int error = 0; 718 719 switch (cmd) { 720 721 case SIOCADDMULTI: 722 case SIOCDELMULTI: 723 break; 724 725 case SIOCGDRVSPEC: 726 case SIOCSDRVSPEC: 727 if (ifd->ifd_cmd >= bridge_control_table_size) { 728 error = EINVAL; 729 break; 730 } 731 bc = &bridge_control_table[ifd->ifd_cmd]; 732 733 if (cmd == SIOCGDRVSPEC && 734 (bc->bc_flags & BC_F_COPYOUT) == 0) { 735 error = EINVAL; 736 break; 737 } 738 else if (cmd == SIOCSDRVSPEC && 739 (bc->bc_flags & BC_F_COPYOUT) != 0) { 740 error = EINVAL; 741 break; 742 } 743 744 if (bc->bc_flags & BC_F_SUSER) { 745 error = priv_check(td, PRIV_NET_BRIDGE); 746 if (error) 747 break; 748 } 749 750 if (ifd->ifd_len != bc->bc_argsize || 751 ifd->ifd_len > sizeof(args)) { 752 error = EINVAL; 753 break; 754 } 755 756 bzero(&args, sizeof(args)); 757 if (bc->bc_flags & BC_F_COPYIN) { 758 error = copyin(ifd->ifd_data, &args, ifd->ifd_len); 759 if (error) 760 break; 761 } 762 763 BRIDGE_LOCK(sc); 764 error = (*bc->bc_func)(sc, &args); 765 BRIDGE_UNLOCK(sc); 766 if (error) 767 break; 768 769 if (bc->bc_flags & BC_F_COPYOUT) 770 error = copyout(&args, ifd->ifd_data, ifd->ifd_len); 771 772 break; 773 774 case SIOCSIFFLAGS: 775 if (!(ifp->if_flags & IFF_UP) && 776 (ifp->if_drv_flags & IFF_DRV_RUNNING)) { 777 /* 778 * If interface is marked down and it is running, 779 * then stop and disable it. 780 */ 781 BRIDGE_LOCK(sc); 782 bridge_stop(ifp, 1); 783 BRIDGE_UNLOCK(sc); 784 } else if ((ifp->if_flags & IFF_UP) && 785 !(ifp->if_drv_flags & IFF_DRV_RUNNING)) { 786 /* 787 * If interface is marked up and it is stopped, then 788 * start it. 789 */ 790 (*ifp->if_init)(sc); 791 } 792 break; 793 794 case SIOCSIFMTU: 795 if (ifr->ifr_mtu < 576) { 796 error = EINVAL; 797 break; 798 } 799 if (LIST_EMPTY(&sc->sc_iflist)) { 800 sc->sc_ifp->if_mtu = ifr->ifr_mtu; 801 break; 802 } 803 BRIDGE_LOCK(sc); 804 LIST_FOREACH(bif, &sc->sc_iflist, bif_next) { 805 if (bif->bif_ifp->if_mtu != ifr->ifr_mtu) { 806 log(LOG_NOTICE, "%s: invalid MTU: %lu(%s)" 807 " != %d\n", sc->sc_ifp->if_xname, 808 bif->bif_ifp->if_mtu, 809 bif->bif_ifp->if_xname, ifr->ifr_mtu); 810 error = EINVAL; 811 break; 812 } 813 } 814 if (!error) 815 sc->sc_ifp->if_mtu = ifr->ifr_mtu; 816 BRIDGE_UNLOCK(sc); 817 break; 818 default: 819 /* 820 * drop the lock as ether_ioctl() will call bridge_start() and 821 * cause the lock to be recursed. 822 */ 823 error = ether_ioctl(ifp, cmd, data); 824 break; 825 } 826 827 return (error); 828 } 829 830 /* 831 * bridge_mutecaps: 832 * 833 * Clear or restore unwanted capabilities on the member interface 834 */ 835 static void 836 bridge_mutecaps(struct bridge_softc *sc) 837 { 838 struct bridge_iflist *bif; 839 int enabled, mask; 840 841 /* Initial bitmask of capabilities to test */ 842 mask = BRIDGE_IFCAPS_MASK; 843 844 LIST_FOREACH(bif, &sc->sc_iflist, bif_next) { 845 /* Every member must support it or its disabled */ 846 mask &= bif->bif_savedcaps; 847 } 848 849 LIST_FOREACH(bif, &sc->sc_iflist, bif_next) { 850 enabled = bif->bif_ifp->if_capenable; 851 enabled &= ~BRIDGE_IFCAPS_STRIP; 852 /* strip off mask bits and enable them again if allowed */ 853 enabled &= ~BRIDGE_IFCAPS_MASK; 854 enabled |= mask; 855 bridge_set_ifcap(sc, bif, enabled); 856 } 857 858 } 859 860 static void 861 bridge_set_ifcap(struct bridge_softc *sc, struct bridge_iflist *bif, int set) 862 { 863 struct ifnet *ifp = bif->bif_ifp; 864 struct ifreq ifr; 865 int error; 866 867 bzero(&ifr, sizeof(ifr)); 868 ifr.ifr_reqcap = set; 869 870 if (ifp->if_capenable != set) { 871 error = (*ifp->if_ioctl)(ifp, SIOCSIFCAP, (caddr_t)&ifr); 872 if (error) 873 if_printf(sc->sc_ifp, 874 "error setting interface capabilities on %s\n", 875 ifp->if_xname); 876 } 877 } 878 879 /* 880 * bridge_lookup_member: 881 * 882 * Lookup a bridge member interface. 883 */ 884 static struct bridge_iflist * 885 bridge_lookup_member(struct bridge_softc *sc, const char *name) 886 { 887 struct bridge_iflist *bif; 888 struct ifnet *ifp; 889 890 BRIDGE_LOCK_ASSERT(sc); 891 892 LIST_FOREACH(bif, &sc->sc_iflist, bif_next) { 893 ifp = bif->bif_ifp; 894 if (strcmp(ifp->if_xname, name) == 0) 895 return (bif); 896 } 897 898 return (NULL); 899 } 900 901 /* 902 * bridge_lookup_member_if: 903 * 904 * Lookup a bridge member interface by ifnet*. 905 */ 906 static struct bridge_iflist * 907 bridge_lookup_member_if(struct bridge_softc *sc, struct ifnet *member_ifp) 908 { 909 struct bridge_iflist *bif; 910 911 BRIDGE_LOCK_ASSERT(sc); 912 913 LIST_FOREACH(bif, &sc->sc_iflist, bif_next) { 914 if (bif->bif_ifp == member_ifp) 915 return (bif); 916 } 917 918 return (NULL); 919 } 920 921 /* 922 * bridge_delete_member: 923 * 924 * Delete the specified member interface. 925 */ 926 static void 927 bridge_delete_member(struct bridge_softc *sc, struct bridge_iflist *bif, 928 int gone) 929 { 930 struct ifnet *ifs = bif->bif_ifp; 931 struct ifnet *fif = NULL; 932 933 BRIDGE_LOCK_ASSERT(sc); 934 935 if (bif->bif_flags & IFBIF_STP) 936 bstp_disable(&bif->bif_stp); 937 938 ifs->if_bridge = NULL; 939 BRIDGE_XLOCK(sc); 940 LIST_REMOVE(bif, bif_next); 941 BRIDGE_XDROP(sc); 942 943 /* 944 * If removing the interface that gave the bridge its mac address, set 945 * the mac address of the bridge to the address of the next member, or 946 * to its default address if no members are left. 947 */ 948 if (bridge_inherit_mac && sc->sc_ifaddr == ifs) { 949 if (LIST_EMPTY(&sc->sc_iflist)) { 950 bcopy(sc->sc_defaddr, 951 IF_LLADDR(sc->sc_ifp), ETHER_ADDR_LEN); 952 sc->sc_ifaddr = NULL; 953 } else { 954 fif = LIST_FIRST(&sc->sc_iflist)->bif_ifp; 955 bcopy(IF_LLADDR(fif), 956 IF_LLADDR(sc->sc_ifp), ETHER_ADDR_LEN); 957 sc->sc_ifaddr = fif; 958 } 959 EVENTHANDLER_INVOKE(iflladdr_event, sc->sc_ifp); 960 } 961 962 bridge_mutecaps(sc); /* recalcuate now this interface is removed */ 963 bridge_rtdelete(sc, ifs, IFBF_FLUSHALL); 964 KASSERT(bif->bif_addrcnt == 0, 965 ("%s: %d bridge routes referenced", __func__, bif->bif_addrcnt)); 966 967 BRIDGE_UNLOCK(sc); 968 if (!gone) { 969 switch (ifs->if_type) { 970 case IFT_ETHER: 971 case IFT_L2VLAN: 972 /* 973 * Take the interface out of promiscuous mode. 974 */ 975 (void) ifpromisc(ifs, 0); 976 break; 977 978 case IFT_GIF: 979 break; 980 981 default: 982 #ifdef DIAGNOSTIC 983 panic("bridge_delete_member: impossible"); 984 #endif 985 break; 986 } 987 /* reneable any interface capabilities */ 988 bridge_set_ifcap(sc, bif, bif->bif_savedcaps); 989 } 990 bstp_destroy(&bif->bif_stp); /* prepare to free */ 991 BRIDGE_LOCK(sc); 992 free(bif, M_DEVBUF); 993 } 994 995 /* 996 * bridge_delete_span: 997 * 998 * Delete the specified span interface. 999 */ 1000 static void 1001 bridge_delete_span(struct bridge_softc *sc, struct bridge_iflist *bif) 1002 { 1003 BRIDGE_LOCK_ASSERT(sc); 1004 1005 KASSERT(bif->bif_ifp->if_bridge == NULL, 1006 ("%s: not a span interface", __func__)); 1007 1008 LIST_REMOVE(bif, bif_next); 1009 free(bif, M_DEVBUF); 1010 } 1011 1012 static int 1013 bridge_ioctl_add(struct bridge_softc *sc, void *arg) 1014 { 1015 struct ifbreq *req = arg; 1016 struct bridge_iflist *bif = NULL; 1017 struct ifnet *ifs; 1018 int error = 0; 1019 1020 ifs = ifunit(req->ifbr_ifsname); 1021 if (ifs == NULL) 1022 return (ENOENT); 1023 if (ifs->if_ioctl == NULL) /* must be supported */ 1024 return (EINVAL); 1025 1026 /* If it's in the span list, it can't be a member. */ 1027 LIST_FOREACH(bif, &sc->sc_spanlist, bif_next) 1028 if (ifs == bif->bif_ifp) 1029 return (EBUSY); 1030 1031 if (ifs->if_bridge == sc) 1032 return (EEXIST); 1033 1034 if (ifs->if_bridge != NULL) 1035 return (EBUSY); 1036 1037 bif = malloc(sizeof(*bif), M_DEVBUF, M_NOWAIT|M_ZERO); 1038 if (bif == NULL) 1039 return (ENOMEM); 1040 1041 bif->bif_ifp = ifs; 1042 bif->bif_flags = IFBIF_LEARNING | IFBIF_DISCOVER; 1043 bif->bif_savedcaps = ifs->if_capenable; 1044 1045 switch (ifs->if_type) { 1046 case IFT_ETHER: 1047 case IFT_L2VLAN: 1048 case IFT_GIF: 1049 /* permitted interface types */ 1050 break; 1051 default: 1052 error = EINVAL; 1053 goto out; 1054 } 1055 1056 /* Allow the first Ethernet member to define the MTU */ 1057 if (LIST_EMPTY(&sc->sc_iflist)) 1058 sc->sc_ifp->if_mtu = ifs->if_mtu; 1059 else if (sc->sc_ifp->if_mtu != ifs->if_mtu) { 1060 if_printf(sc->sc_ifp, "invalid MTU: %lu(%s) != %lu\n", 1061 ifs->if_mtu, ifs->if_xname, sc->sc_ifp->if_mtu); 1062 error = EINVAL; 1063 goto out; 1064 } 1065 1066 /* 1067 * Assign the interface's MAC address to the bridge if it's the first 1068 * member and the MAC address of the bridge has not been changed from 1069 * the default randomly generated one. 1070 */ 1071 if (bridge_inherit_mac && LIST_EMPTY(&sc->sc_iflist) && 1072 !memcmp(IF_LLADDR(sc->sc_ifp), sc->sc_defaddr, ETHER_ADDR_LEN)) { 1073 bcopy(IF_LLADDR(ifs), IF_LLADDR(sc->sc_ifp), ETHER_ADDR_LEN); 1074 sc->sc_ifaddr = ifs; 1075 EVENTHANDLER_INVOKE(iflladdr_event, sc->sc_ifp); 1076 } 1077 1078 ifs->if_bridge = sc; 1079 bstp_create(&sc->sc_stp, &bif->bif_stp, bif->bif_ifp); 1080 /* 1081 * XXX: XLOCK HERE!?! 1082 * 1083 * NOTE: insert_***HEAD*** should be safe for the traversals. 1084 */ 1085 LIST_INSERT_HEAD(&sc->sc_iflist, bif, bif_next); 1086 1087 /* Set interface capabilities to the intersection set of all members */ 1088 bridge_mutecaps(sc); 1089 1090 switch (ifs->if_type) { 1091 case IFT_ETHER: 1092 case IFT_L2VLAN: 1093 /* 1094 * Place the interface into promiscuous mode. 1095 */ 1096 BRIDGE_UNLOCK(sc); 1097 error = ifpromisc(ifs, 1); 1098 BRIDGE_LOCK(sc); 1099 break; 1100 } 1101 if (error) 1102 bridge_delete_member(sc, bif, 0); 1103 out: 1104 if (error) { 1105 if (bif != NULL) 1106 free(bif, M_DEVBUF); 1107 } 1108 return (error); 1109 } 1110 1111 static int 1112 bridge_ioctl_del(struct bridge_softc *sc, void *arg) 1113 { 1114 struct ifbreq *req = arg; 1115 struct bridge_iflist *bif; 1116 1117 bif = bridge_lookup_member(sc, req->ifbr_ifsname); 1118 if (bif == NULL) 1119 return (ENOENT); 1120 1121 bridge_delete_member(sc, bif, 0); 1122 1123 return (0); 1124 } 1125 1126 static int 1127 bridge_ioctl_gifflags(struct bridge_softc *sc, void *arg) 1128 { 1129 struct ifbreq *req = arg; 1130 struct bridge_iflist *bif; 1131 struct bstp_port *bp; 1132 1133 bif = bridge_lookup_member(sc, req->ifbr_ifsname); 1134 if (bif == NULL) 1135 return (ENOENT); 1136 1137 bp = &bif->bif_stp; 1138 req->ifbr_ifsflags = bif->bif_flags; 1139 req->ifbr_state = bp->bp_state; 1140 req->ifbr_priority = bp->bp_priority; 1141 req->ifbr_path_cost = bp->bp_path_cost; 1142 req->ifbr_portno = bif->bif_ifp->if_index & 0xfff; 1143 req->ifbr_proto = bp->bp_protover; 1144 req->ifbr_role = bp->bp_role; 1145 req->ifbr_stpflags = bp->bp_flags; 1146 req->ifbr_addrcnt = bif->bif_addrcnt; 1147 req->ifbr_addrmax = bif->bif_addrmax; 1148 req->ifbr_addrexceeded = bif->bif_addrexceeded; 1149 1150 /* Copy STP state options as flags */ 1151 if (bp->bp_operedge) 1152 req->ifbr_ifsflags |= IFBIF_BSTP_EDGE; 1153 if (bp->bp_flags & BSTP_PORT_AUTOEDGE) 1154 req->ifbr_ifsflags |= IFBIF_BSTP_AUTOEDGE; 1155 if (bp->bp_ptp_link) 1156 req->ifbr_ifsflags |= IFBIF_BSTP_PTP; 1157 if (bp->bp_flags & BSTP_PORT_AUTOPTP) 1158 req->ifbr_ifsflags |= IFBIF_BSTP_AUTOPTP; 1159 if (bp->bp_flags & BSTP_PORT_ADMEDGE) 1160 req->ifbr_ifsflags |= IFBIF_BSTP_ADMEDGE; 1161 if (bp->bp_flags & BSTP_PORT_ADMCOST) 1162 req->ifbr_ifsflags |= IFBIF_BSTP_ADMCOST; 1163 return (0); 1164 } 1165 1166 static int 1167 bridge_ioctl_sifflags(struct bridge_softc *sc, void *arg) 1168 { 1169 struct ifbreq *req = arg; 1170 struct bridge_iflist *bif; 1171 struct bstp_port *bp; 1172 int error; 1173 1174 bif = bridge_lookup_member(sc, req->ifbr_ifsname); 1175 if (bif == NULL) 1176 return (ENOENT); 1177 bp = &bif->bif_stp; 1178 1179 if (req->ifbr_ifsflags & IFBIF_SPAN) 1180 /* SPAN is readonly */ 1181 return (EINVAL); 1182 1183 if (req->ifbr_ifsflags & IFBIF_STP) { 1184 if ((bif->bif_flags & IFBIF_STP) == 0) { 1185 error = bstp_enable(&bif->bif_stp); 1186 if (error) 1187 return (error); 1188 } 1189 } else { 1190 if ((bif->bif_flags & IFBIF_STP) != 0) 1191 bstp_disable(&bif->bif_stp); 1192 } 1193 1194 /* Pass on STP flags */ 1195 bstp_set_edge(bp, req->ifbr_ifsflags & IFBIF_BSTP_EDGE ? 1 : 0); 1196 bstp_set_autoedge(bp, req->ifbr_ifsflags & IFBIF_BSTP_AUTOEDGE ? 1 : 0); 1197 bstp_set_ptp(bp, req->ifbr_ifsflags & IFBIF_BSTP_PTP ? 1 : 0); 1198 bstp_set_autoptp(bp, req->ifbr_ifsflags & IFBIF_BSTP_AUTOPTP ? 1 : 0); 1199 1200 /* Save the bits relating to the bridge */ 1201 bif->bif_flags = req->ifbr_ifsflags & IFBIFMASK; 1202 1203 return (0); 1204 } 1205 1206 static int 1207 bridge_ioctl_scache(struct bridge_softc *sc, void *arg) 1208 { 1209 struct ifbrparam *param = arg; 1210 1211 sc->sc_brtmax = param->ifbrp_csize; 1212 bridge_rttrim(sc); 1213 1214 return (0); 1215 } 1216 1217 static int 1218 bridge_ioctl_gcache(struct bridge_softc *sc, void *arg) 1219 { 1220 struct ifbrparam *param = arg; 1221 1222 param->ifbrp_csize = sc->sc_brtmax; 1223 1224 return (0); 1225 } 1226 1227 static int 1228 bridge_ioctl_gifs(struct bridge_softc *sc, void *arg) 1229 { 1230 struct ifbifconf *bifc = arg; 1231 struct bridge_iflist *bif; 1232 struct ifbreq breq; 1233 char *buf, *outbuf; 1234 int count, buflen, len, error = 0; 1235 1236 count = 0; 1237 LIST_FOREACH(bif, &sc->sc_iflist, bif_next) 1238 count++; 1239 LIST_FOREACH(bif, &sc->sc_spanlist, bif_next) 1240 count++; 1241 1242 buflen = sizeof(breq) * count; 1243 if (bifc->ifbic_len == 0) { 1244 bifc->ifbic_len = buflen; 1245 return (0); 1246 } 1247 BRIDGE_UNLOCK(sc); 1248 outbuf = malloc(buflen, M_TEMP, M_WAITOK | M_ZERO); 1249 BRIDGE_LOCK(sc); 1250 1251 count = 0; 1252 buf = outbuf; 1253 len = min(bifc->ifbic_len, buflen); 1254 bzero(&breq, sizeof(breq)); 1255 LIST_FOREACH(bif, &sc->sc_iflist, bif_next) { 1256 if (len < sizeof(breq)) 1257 break; 1258 1259 strlcpy(breq.ifbr_ifsname, bif->bif_ifp->if_xname, 1260 sizeof(breq.ifbr_ifsname)); 1261 /* Fill in the ifbreq structure */ 1262 error = bridge_ioctl_gifflags(sc, &breq); 1263 if (error) 1264 break; 1265 memcpy(buf, &breq, sizeof(breq)); 1266 count++; 1267 buf += sizeof(breq); 1268 len -= sizeof(breq); 1269 } 1270 LIST_FOREACH(bif, &sc->sc_spanlist, bif_next) { 1271 if (len < sizeof(breq)) 1272 break; 1273 1274 strlcpy(breq.ifbr_ifsname, bif->bif_ifp->if_xname, 1275 sizeof(breq.ifbr_ifsname)); 1276 breq.ifbr_ifsflags = bif->bif_flags; 1277 breq.ifbr_portno = bif->bif_ifp->if_index & 0xfff; 1278 memcpy(buf, &breq, sizeof(breq)); 1279 count++; 1280 buf += sizeof(breq); 1281 len -= sizeof(breq); 1282 } 1283 1284 BRIDGE_UNLOCK(sc); 1285 bifc->ifbic_len = sizeof(breq) * count; 1286 error = copyout(outbuf, bifc->ifbic_req, bifc->ifbic_len); 1287 BRIDGE_LOCK(sc); 1288 free(outbuf, M_TEMP); 1289 return (error); 1290 } 1291 1292 static int 1293 bridge_ioctl_rts(struct bridge_softc *sc, void *arg) 1294 { 1295 struct ifbaconf *bac = arg; 1296 struct bridge_rtnode *brt; 1297 struct ifbareq bareq; 1298 char *buf, *outbuf; 1299 int count, buflen, len, error = 0; 1300 1301 if (bac->ifbac_len == 0) 1302 return (0); 1303 1304 count = 0; 1305 LIST_FOREACH(brt, &sc->sc_rtlist, brt_list) 1306 count++; 1307 buflen = sizeof(bareq) * count; 1308 1309 BRIDGE_UNLOCK(sc); 1310 outbuf = malloc(buflen, M_TEMP, M_WAITOK | M_ZERO); 1311 BRIDGE_LOCK(sc); 1312 1313 count = 0; 1314 buf = outbuf; 1315 len = min(bac->ifbac_len, buflen); 1316 bzero(&bareq, sizeof(bareq)); 1317 LIST_FOREACH(brt, &sc->sc_rtlist, brt_list) { 1318 if (len < sizeof(bareq)) 1319 goto out; 1320 strlcpy(bareq.ifba_ifsname, brt->brt_ifp->if_xname, 1321 sizeof(bareq.ifba_ifsname)); 1322 memcpy(bareq.ifba_dst, brt->brt_addr, sizeof(brt->brt_addr)); 1323 bareq.ifba_vlan = brt->brt_vlan; 1324 if ((brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC && 1325 time_uptime < brt->brt_expire) 1326 bareq.ifba_expire = brt->brt_expire - time_uptime; 1327 else 1328 bareq.ifba_expire = 0; 1329 bareq.ifba_flags = brt->brt_flags; 1330 1331 memcpy(buf, &bareq, sizeof(bareq)); 1332 count++; 1333 buf += sizeof(bareq); 1334 len -= sizeof(bareq); 1335 } 1336 out: 1337 BRIDGE_UNLOCK(sc); 1338 bac->ifbac_len = sizeof(bareq) * count; 1339 error = copyout(outbuf, bac->ifbac_req, bac->ifbac_len); 1340 BRIDGE_LOCK(sc); 1341 free(outbuf, M_TEMP); 1342 return (error); 1343 } 1344 1345 static int 1346 bridge_ioctl_saddr(struct bridge_softc *sc, void *arg) 1347 { 1348 struct ifbareq *req = arg; 1349 struct bridge_iflist *bif; 1350 int error; 1351 1352 bif = bridge_lookup_member(sc, req->ifba_ifsname); 1353 if (bif == NULL) 1354 return (ENOENT); 1355 1356 error = bridge_rtupdate(sc, req->ifba_dst, req->ifba_vlan, bif, 1, 1357 req->ifba_flags); 1358 1359 return (error); 1360 } 1361 1362 static int 1363 bridge_ioctl_sto(struct bridge_softc *sc, void *arg) 1364 { 1365 struct ifbrparam *param = arg; 1366 1367 sc->sc_brttimeout = param->ifbrp_ctime; 1368 return (0); 1369 } 1370 1371 static int 1372 bridge_ioctl_gto(struct bridge_softc *sc, void *arg) 1373 { 1374 struct ifbrparam *param = arg; 1375 1376 param->ifbrp_ctime = sc->sc_brttimeout; 1377 return (0); 1378 } 1379 1380 static int 1381 bridge_ioctl_daddr(struct bridge_softc *sc, void *arg) 1382 { 1383 struct ifbareq *req = arg; 1384 1385 return (bridge_rtdaddr(sc, req->ifba_dst, req->ifba_vlan)); 1386 } 1387 1388 static int 1389 bridge_ioctl_flush(struct bridge_softc *sc, void *arg) 1390 { 1391 struct ifbreq *req = arg; 1392 1393 bridge_rtflush(sc, req->ifbr_ifsflags); 1394 return (0); 1395 } 1396 1397 static int 1398 bridge_ioctl_gpri(struct bridge_softc *sc, void *arg) 1399 { 1400 struct ifbrparam *param = arg; 1401 struct bstp_state *bs = &sc->sc_stp; 1402 1403 param->ifbrp_prio = bs->bs_bridge_priority; 1404 return (0); 1405 } 1406 1407 static int 1408 bridge_ioctl_spri(struct bridge_softc *sc, void *arg) 1409 { 1410 struct ifbrparam *param = arg; 1411 1412 return (bstp_set_priority(&sc->sc_stp, param->ifbrp_prio)); 1413 } 1414 1415 static int 1416 bridge_ioctl_ght(struct bridge_softc *sc, void *arg) 1417 { 1418 struct ifbrparam *param = arg; 1419 struct bstp_state *bs = &sc->sc_stp; 1420 1421 param->ifbrp_hellotime = bs->bs_bridge_htime >> 8; 1422 return (0); 1423 } 1424 1425 static int 1426 bridge_ioctl_sht(struct bridge_softc *sc, void *arg) 1427 { 1428 struct ifbrparam *param = arg; 1429 1430 return (bstp_set_htime(&sc->sc_stp, param->ifbrp_hellotime)); 1431 } 1432 1433 static int 1434 bridge_ioctl_gfd(struct bridge_softc *sc, void *arg) 1435 { 1436 struct ifbrparam *param = arg; 1437 struct bstp_state *bs = &sc->sc_stp; 1438 1439 param->ifbrp_fwddelay = bs->bs_bridge_fdelay >> 8; 1440 return (0); 1441 } 1442 1443 static int 1444 bridge_ioctl_sfd(struct bridge_softc *sc, void *arg) 1445 { 1446 struct ifbrparam *param = arg; 1447 1448 return (bstp_set_fdelay(&sc->sc_stp, param->ifbrp_fwddelay)); 1449 } 1450 1451 static int 1452 bridge_ioctl_gma(struct bridge_softc *sc, void *arg) 1453 { 1454 struct ifbrparam *param = arg; 1455 struct bstp_state *bs = &sc->sc_stp; 1456 1457 param->ifbrp_maxage = bs->bs_bridge_max_age >> 8; 1458 return (0); 1459 } 1460 1461 static int 1462 bridge_ioctl_sma(struct bridge_softc *sc, void *arg) 1463 { 1464 struct ifbrparam *param = arg; 1465 1466 return (bstp_set_maxage(&sc->sc_stp, param->ifbrp_maxage)); 1467 } 1468 1469 static int 1470 bridge_ioctl_sifprio(struct bridge_softc *sc, void *arg) 1471 { 1472 struct ifbreq *req = arg; 1473 struct bridge_iflist *bif; 1474 1475 bif = bridge_lookup_member(sc, req->ifbr_ifsname); 1476 if (bif == NULL) 1477 return (ENOENT); 1478 1479 return (bstp_set_port_priority(&bif->bif_stp, req->ifbr_priority)); 1480 } 1481 1482 static int 1483 bridge_ioctl_sifcost(struct bridge_softc *sc, void *arg) 1484 { 1485 struct ifbreq *req = arg; 1486 struct bridge_iflist *bif; 1487 1488 bif = bridge_lookup_member(sc, req->ifbr_ifsname); 1489 if (bif == NULL) 1490 return (ENOENT); 1491 1492 return (bstp_set_path_cost(&bif->bif_stp, req->ifbr_path_cost)); 1493 } 1494 1495 static int 1496 bridge_ioctl_sifmaxaddr(struct bridge_softc *sc, void *arg) 1497 { 1498 struct ifbreq *req = arg; 1499 struct bridge_iflist *bif; 1500 1501 bif = bridge_lookup_member(sc, req->ifbr_ifsname); 1502 if (bif == NULL) 1503 return (ENOENT); 1504 1505 bif->bif_addrmax = req->ifbr_addrmax; 1506 return (0); 1507 } 1508 1509 static int 1510 bridge_ioctl_addspan(struct bridge_softc *sc, void *arg) 1511 { 1512 struct ifbreq *req = arg; 1513 struct bridge_iflist *bif = NULL; 1514 struct ifnet *ifs; 1515 1516 ifs = ifunit(req->ifbr_ifsname); 1517 if (ifs == NULL) 1518 return (ENOENT); 1519 1520 LIST_FOREACH(bif, &sc->sc_spanlist, bif_next) 1521 if (ifs == bif->bif_ifp) 1522 return (EBUSY); 1523 1524 if (ifs->if_bridge != NULL) 1525 return (EBUSY); 1526 1527 switch (ifs->if_type) { 1528 case IFT_ETHER: 1529 case IFT_GIF: 1530 case IFT_L2VLAN: 1531 break; 1532 default: 1533 return (EINVAL); 1534 } 1535 1536 bif = malloc(sizeof(*bif), M_DEVBUF, M_NOWAIT|M_ZERO); 1537 if (bif == NULL) 1538 return (ENOMEM); 1539 1540 bif->bif_ifp = ifs; 1541 bif->bif_flags = IFBIF_SPAN; 1542 1543 LIST_INSERT_HEAD(&sc->sc_spanlist, bif, bif_next); 1544 1545 return (0); 1546 } 1547 1548 static int 1549 bridge_ioctl_delspan(struct bridge_softc *sc, void *arg) 1550 { 1551 struct ifbreq *req = arg; 1552 struct bridge_iflist *bif; 1553 struct ifnet *ifs; 1554 1555 ifs = ifunit(req->ifbr_ifsname); 1556 if (ifs == NULL) 1557 return (ENOENT); 1558 1559 LIST_FOREACH(bif, &sc->sc_spanlist, bif_next) 1560 if (ifs == bif->bif_ifp) 1561 break; 1562 1563 if (bif == NULL) 1564 return (ENOENT); 1565 1566 bridge_delete_span(sc, bif); 1567 1568 return (0); 1569 } 1570 1571 static int 1572 bridge_ioctl_gbparam(struct bridge_softc *sc, void *arg) 1573 { 1574 struct ifbropreq *req = arg; 1575 struct bstp_state *bs = &sc->sc_stp; 1576 struct bstp_port *root_port; 1577 1578 req->ifbop_maxage = bs->bs_bridge_max_age >> 8; 1579 req->ifbop_hellotime = bs->bs_bridge_htime >> 8; 1580 req->ifbop_fwddelay = bs->bs_bridge_fdelay >> 8; 1581 1582 root_port = bs->bs_root_port; 1583 if (root_port == NULL) 1584 req->ifbop_root_port = 0; 1585 else 1586 req->ifbop_root_port = root_port->bp_ifp->if_index; 1587 1588 req->ifbop_holdcount = bs->bs_txholdcount; 1589 req->ifbop_priority = bs->bs_bridge_priority; 1590 req->ifbop_protocol = bs->bs_protover; 1591 req->ifbop_root_path_cost = bs->bs_root_pv.pv_cost; 1592 req->ifbop_bridgeid = bs->bs_bridge_pv.pv_dbridge_id; 1593 req->ifbop_designated_root = bs->bs_root_pv.pv_root_id; 1594 req->ifbop_designated_bridge = bs->bs_root_pv.pv_dbridge_id; 1595 req->ifbop_last_tc_time.tv_sec = bs->bs_last_tc_time.tv_sec; 1596 req->ifbop_last_tc_time.tv_usec = bs->bs_last_tc_time.tv_usec; 1597 1598 return (0); 1599 } 1600 1601 static int 1602 bridge_ioctl_grte(struct bridge_softc *sc, void *arg) 1603 { 1604 struct ifbrparam *param = arg; 1605 1606 param->ifbrp_cexceeded = sc->sc_brtexceeded; 1607 return (0); 1608 } 1609 1610 static int 1611 bridge_ioctl_gifsstp(struct bridge_softc *sc, void *arg) 1612 { 1613 struct ifbpstpconf *bifstp = arg; 1614 struct bridge_iflist *bif; 1615 struct bstp_port *bp; 1616 struct ifbpstpreq bpreq; 1617 char *buf, *outbuf; 1618 int count, buflen, len, error = 0; 1619 1620 count = 0; 1621 LIST_FOREACH(bif, &sc->sc_iflist, bif_next) { 1622 if ((bif->bif_flags & IFBIF_STP) != 0) 1623 count++; 1624 } 1625 1626 buflen = sizeof(bpreq) * count; 1627 if (bifstp->ifbpstp_len == 0) { 1628 bifstp->ifbpstp_len = buflen; 1629 return (0); 1630 } 1631 1632 BRIDGE_UNLOCK(sc); 1633 outbuf = malloc(buflen, M_TEMP, M_WAITOK | M_ZERO); 1634 BRIDGE_LOCK(sc); 1635 1636 count = 0; 1637 buf = outbuf; 1638 len = min(bifstp->ifbpstp_len, buflen); 1639 bzero(&bpreq, sizeof(bpreq)); 1640 LIST_FOREACH(bif, &sc->sc_iflist, bif_next) { 1641 if (len < sizeof(bpreq)) 1642 break; 1643 1644 if ((bif->bif_flags & IFBIF_STP) == 0) 1645 continue; 1646 1647 bp = &bif->bif_stp; 1648 bpreq.ifbp_portno = bif->bif_ifp->if_index & 0xfff; 1649 bpreq.ifbp_fwd_trans = bp->bp_forward_transitions; 1650 bpreq.ifbp_design_cost = bp->bp_desg_pv.pv_cost; 1651 bpreq.ifbp_design_port = bp->bp_desg_pv.pv_port_id; 1652 bpreq.ifbp_design_bridge = bp->bp_desg_pv.pv_dbridge_id; 1653 bpreq.ifbp_design_root = bp->bp_desg_pv.pv_root_id; 1654 1655 memcpy(buf, &bpreq, sizeof(bpreq)); 1656 count++; 1657 buf += sizeof(bpreq); 1658 len -= sizeof(bpreq); 1659 } 1660 1661 BRIDGE_UNLOCK(sc); 1662 bifstp->ifbpstp_len = sizeof(bpreq) * count; 1663 error = copyout(outbuf, bifstp->ifbpstp_req, bifstp->ifbpstp_len); 1664 BRIDGE_LOCK(sc); 1665 free(outbuf, M_TEMP); 1666 return (error); 1667 } 1668 1669 static int 1670 bridge_ioctl_sproto(struct bridge_softc *sc, void *arg) 1671 { 1672 struct ifbrparam *param = arg; 1673 1674 return (bstp_set_protocol(&sc->sc_stp, param->ifbrp_proto)); 1675 } 1676 1677 static int 1678 bridge_ioctl_stxhc(struct bridge_softc *sc, void *arg) 1679 { 1680 struct ifbrparam *param = arg; 1681 1682 return (bstp_set_holdcount(&sc->sc_stp, param->ifbrp_txhc)); 1683 } 1684 1685 /* 1686 * bridge_ifdetach: 1687 * 1688 * Detach an interface from a bridge. Called when a member 1689 * interface is detaching. 1690 */ 1691 static void 1692 bridge_ifdetach(void *arg __unused, struct ifnet *ifp) 1693 { 1694 struct bridge_softc *sc = ifp->if_bridge; 1695 struct bridge_iflist *bif; 1696 1697 /* Check if the interface is a bridge member */ 1698 if (sc != NULL) { 1699 BRIDGE_LOCK(sc); 1700 1701 bif = bridge_lookup_member_if(sc, ifp); 1702 if (bif != NULL) 1703 bridge_delete_member(sc, bif, 1); 1704 1705 BRIDGE_UNLOCK(sc); 1706 return; 1707 } 1708 1709 /* Check if the interface is a span port */ 1710 mtx_lock(&bridge_list_mtx); 1711 LIST_FOREACH(sc, &bridge_list, sc_list) { 1712 BRIDGE_LOCK(sc); 1713 LIST_FOREACH(bif, &sc->sc_spanlist, bif_next) 1714 if (ifp == bif->bif_ifp) { 1715 bridge_delete_span(sc, bif); 1716 break; 1717 } 1718 1719 BRIDGE_UNLOCK(sc); 1720 } 1721 mtx_unlock(&bridge_list_mtx); 1722 } 1723 1724 /* 1725 * bridge_init: 1726 * 1727 * Initialize a bridge interface. 1728 */ 1729 static void 1730 bridge_init(void *xsc) 1731 { 1732 struct bridge_softc *sc = (struct bridge_softc *)xsc; 1733 struct ifnet *ifp = sc->sc_ifp; 1734 1735 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 1736 return; 1737 1738 BRIDGE_LOCK(sc); 1739 callout_reset(&sc->sc_brcallout, bridge_rtable_prune_period * hz, 1740 bridge_timer, sc); 1741 1742 ifp->if_drv_flags |= IFF_DRV_RUNNING; 1743 bstp_init(&sc->sc_stp); /* Initialize Spanning Tree */ 1744 1745 BRIDGE_UNLOCK(sc); 1746 } 1747 1748 /* 1749 * bridge_stop: 1750 * 1751 * Stop the bridge interface. 1752 */ 1753 static void 1754 bridge_stop(struct ifnet *ifp, int disable) 1755 { 1756 struct bridge_softc *sc = ifp->if_softc; 1757 1758 BRIDGE_LOCK_ASSERT(sc); 1759 1760 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) 1761 return; 1762 1763 callout_stop(&sc->sc_brcallout); 1764 bstp_stop(&sc->sc_stp); 1765 1766 bridge_rtflush(sc, IFBF_FLUSHDYN); 1767 1768 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1769 } 1770 1771 /* 1772 * bridge_enqueue: 1773 * 1774 * Enqueue a packet on a bridge member interface. 1775 * 1776 */ 1777 static void 1778 bridge_enqueue(struct bridge_softc *sc, struct ifnet *dst_ifp, struct mbuf *m) 1779 { 1780 int len, err = 0; 1781 short mflags; 1782 struct mbuf *m0; 1783 1784 len = m->m_pkthdr.len; 1785 mflags = m->m_flags; 1786 1787 /* We may be sending a fragment so traverse the mbuf */ 1788 for (; m; m = m0) { 1789 m0 = m->m_nextpkt; 1790 m->m_nextpkt = NULL; 1791 1792 /* 1793 * If underlying interface can not do VLAN tag insertion itself 1794 * then attach a packet tag that holds it. 1795 */ 1796 if ((m->m_flags & M_VLANTAG) && 1797 (dst_ifp->if_capenable & IFCAP_VLAN_HWTAGGING) == 0) { 1798 m = ether_vlanencap(m, m->m_pkthdr.ether_vtag); 1799 if (m == NULL) { 1800 if_printf(dst_ifp, 1801 "unable to prepend VLAN header\n"); 1802 dst_ifp->if_oerrors++; 1803 continue; 1804 } 1805 m->m_flags &= ~M_VLANTAG; 1806 } 1807 1808 if (err == 0) 1809 dst_ifp->if_transmit(dst_ifp, m); 1810 } 1811 1812 if (err == 0) { 1813 sc->sc_ifp->if_opackets++; 1814 sc->sc_ifp->if_obytes += len; 1815 if (mflags & M_MCAST) 1816 sc->sc_ifp->if_omcasts++; 1817 } 1818 } 1819 1820 /* 1821 * bridge_dummynet: 1822 * 1823 * Receive a queued packet from dummynet and pass it on to the output 1824 * interface. 1825 * 1826 * The mbuf has the Ethernet header already attached. 1827 */ 1828 static void 1829 bridge_dummynet(struct mbuf *m, struct ifnet *ifp) 1830 { 1831 struct bridge_softc *sc; 1832 1833 sc = ifp->if_bridge; 1834 1835 /* 1836 * The packet didnt originate from a member interface. This should only 1837 * ever happen if a member interface is removed while packets are 1838 * queued for it. 1839 */ 1840 if (sc == NULL) { 1841 m_freem(m); 1842 return; 1843 } 1844 1845 if (PFIL_HOOKED(&V_inet_pfil_hook) 1846 #ifdef INET6 1847 || PFIL_HOOKED(&V_inet6_pfil_hook) 1848 #endif 1849 ) { 1850 if (bridge_pfil(&m, sc->sc_ifp, ifp, PFIL_OUT) != 0) 1851 return; 1852 if (m == NULL) 1853 return; 1854 } 1855 1856 bridge_enqueue(sc, ifp, m); 1857 } 1858 1859 /* 1860 * bridge_output: 1861 * 1862 * Send output from a bridge member interface. This 1863 * performs the bridging function for locally originated 1864 * packets. 1865 * 1866 * The mbuf has the Ethernet header already attached. We must 1867 * enqueue or free the mbuf before returning. 1868 */ 1869 static int 1870 bridge_output(struct ifnet *ifp, struct mbuf *m, struct sockaddr *sa, 1871 struct rtentry *rt) 1872 { 1873 struct ether_header *eh; 1874 struct ifnet *dst_if; 1875 struct bridge_softc *sc; 1876 uint16_t vlan; 1877 1878 if (m->m_len < ETHER_HDR_LEN) { 1879 m = m_pullup(m, ETHER_HDR_LEN); 1880 if (m == NULL) 1881 return (0); 1882 } 1883 1884 eh = mtod(m, struct ether_header *); 1885 sc = ifp->if_bridge; 1886 vlan = VLANTAGOF(m); 1887 1888 BRIDGE_LOCK(sc); 1889 1890 /* 1891 * If bridge is down, but the original output interface is up, 1892 * go ahead and send out that interface. Otherwise, the packet 1893 * is dropped below. 1894 */ 1895 if ((sc->sc_ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { 1896 dst_if = ifp; 1897 goto sendunicast; 1898 } 1899 1900 /* 1901 * If the packet is a multicast, or we don't know a better way to 1902 * get there, send to all interfaces. 1903 */ 1904 if (ETHER_IS_MULTICAST(eh->ether_dhost)) 1905 dst_if = NULL; 1906 else 1907 dst_if = bridge_rtlookup(sc, eh->ether_dhost, vlan); 1908 if (dst_if == NULL) { 1909 struct bridge_iflist *bif; 1910 struct mbuf *mc; 1911 int error = 0, used = 0; 1912 1913 bridge_span(sc, m); 1914 1915 BRIDGE_LOCK2REF(sc, error); 1916 if (error) { 1917 m_freem(m); 1918 return (0); 1919 } 1920 1921 LIST_FOREACH(bif, &sc->sc_iflist, bif_next) { 1922 dst_if = bif->bif_ifp; 1923 1924 if (dst_if->if_type == IFT_GIF) 1925 continue; 1926 if ((dst_if->if_drv_flags & IFF_DRV_RUNNING) == 0) 1927 continue; 1928 1929 /* 1930 * If this is not the original output interface, 1931 * and the interface is participating in spanning 1932 * tree, make sure the port is in a state that 1933 * allows forwarding. 1934 */ 1935 if (dst_if != ifp && (bif->bif_flags & IFBIF_STP) && 1936 bif->bif_stp.bp_state == BSTP_IFSTATE_DISCARDING) 1937 continue; 1938 1939 if (LIST_NEXT(bif, bif_next) == NULL) { 1940 used = 1; 1941 mc = m; 1942 } else { 1943 mc = m_copypacket(m, M_DONTWAIT); 1944 if (mc == NULL) { 1945 sc->sc_ifp->if_oerrors++; 1946 continue; 1947 } 1948 } 1949 1950 bridge_enqueue(sc, dst_if, mc); 1951 } 1952 if (used == 0) 1953 m_freem(m); 1954 BRIDGE_UNREF(sc); 1955 return (0); 1956 } 1957 1958 sendunicast: 1959 /* 1960 * XXX Spanning tree consideration here? 1961 */ 1962 1963 bridge_span(sc, m); 1964 if ((dst_if->if_drv_flags & IFF_DRV_RUNNING) == 0) { 1965 m_freem(m); 1966 BRIDGE_UNLOCK(sc); 1967 return (0); 1968 } 1969 1970 BRIDGE_UNLOCK(sc); 1971 bridge_enqueue(sc, dst_if, m); 1972 return (0); 1973 } 1974 1975 /* 1976 * bridge_start: 1977 * 1978 * Start output on a bridge. 1979 * 1980 */ 1981 static void 1982 bridge_start(struct ifnet *ifp) 1983 { 1984 struct bridge_softc *sc; 1985 struct mbuf *m; 1986 struct ether_header *eh; 1987 struct ifnet *dst_if; 1988 1989 sc = ifp->if_softc; 1990 1991 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 1992 for (;;) { 1993 IFQ_DEQUEUE(&ifp->if_snd, m); 1994 if (m == 0) 1995 break; 1996 ETHER_BPF_MTAP(ifp, m); 1997 1998 eh = mtod(m, struct ether_header *); 1999 dst_if = NULL; 2000 2001 BRIDGE_LOCK(sc); 2002 if ((m->m_flags & (M_BCAST|M_MCAST)) == 0) { 2003 dst_if = bridge_rtlookup(sc, eh->ether_dhost, 1); 2004 } 2005 2006 if (dst_if == NULL) 2007 bridge_broadcast(sc, ifp, m, 0); 2008 else { 2009 BRIDGE_UNLOCK(sc); 2010 bridge_enqueue(sc, dst_if, m); 2011 } 2012 } 2013 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 2014 } 2015 2016 /* 2017 * bridge_forward: 2018 * 2019 * The forwarding function of the bridge. 2020 * 2021 * NOTE: Releases the lock on return. 2022 */ 2023 static void 2024 bridge_forward(struct bridge_softc *sc, struct bridge_iflist *sbif, 2025 struct mbuf *m) 2026 { 2027 struct bridge_iflist *dbif; 2028 struct ifnet *src_if, *dst_if, *ifp; 2029 struct ether_header *eh; 2030 uint16_t vlan; 2031 uint8_t *dst; 2032 int error; 2033 2034 src_if = m->m_pkthdr.rcvif; 2035 ifp = sc->sc_ifp; 2036 2037 ifp->if_ipackets++; 2038 ifp->if_ibytes += m->m_pkthdr.len; 2039 vlan = VLANTAGOF(m); 2040 2041 if ((sbif->bif_flags & IFBIF_STP) && 2042 sbif->bif_stp.bp_state == BSTP_IFSTATE_DISCARDING) 2043 goto drop; 2044 2045 eh = mtod(m, struct ether_header *); 2046 dst = eh->ether_dhost; 2047 2048 /* If the interface is learning, record the address. */ 2049 if (sbif->bif_flags & IFBIF_LEARNING) { 2050 error = bridge_rtupdate(sc, eh->ether_shost, vlan, 2051 sbif, 0, IFBAF_DYNAMIC); 2052 /* 2053 * If the interface has addresses limits then deny any source 2054 * that is not in the cache. 2055 */ 2056 if (error && sbif->bif_addrmax) 2057 goto drop; 2058 } 2059 2060 if ((sbif->bif_flags & IFBIF_STP) != 0 && 2061 sbif->bif_stp.bp_state == BSTP_IFSTATE_LEARNING) 2062 goto drop; 2063 2064 /* 2065 * At this point, the port either doesn't participate 2066 * in spanning tree or it is in the forwarding state. 2067 */ 2068 2069 /* 2070 * If the packet is unicast, destined for someone on 2071 * "this" side of the bridge, drop it. 2072 */ 2073 if ((m->m_flags & (M_BCAST|M_MCAST)) == 0) { 2074 dst_if = bridge_rtlookup(sc, dst, vlan); 2075 if (src_if == dst_if) 2076 goto drop; 2077 } else { 2078 /* 2079 * Check if its a reserved multicast address, any address 2080 * listed in 802.1D section 7.12.6 may not be forwarded by the 2081 * bridge. 2082 * This is currently 01-80-C2-00-00-00 to 01-80-C2-00-00-0F 2083 */ 2084 if (dst[0] == 0x01 && dst[1] == 0x80 && 2085 dst[2] == 0xc2 && dst[3] == 0x00 && 2086 dst[4] == 0x00 && dst[5] <= 0x0f) 2087 goto drop; 2088 2089 /* ...forward it to all interfaces. */ 2090 ifp->if_imcasts++; 2091 dst_if = NULL; 2092 } 2093 2094 /* 2095 * If we have a destination interface which is a member of our bridge, 2096 * OR this is a unicast packet, push it through the bpf(4) machinery. 2097 * For broadcast or multicast packets, don't bother because it will 2098 * be reinjected into ether_input. We do this before we pass the packets 2099 * through the pfil(9) framework, as it is possible that pfil(9) will 2100 * drop the packet, or possibly modify it, making it difficult to debug 2101 * firewall issues on the bridge. 2102 */ 2103 if (dst_if != NULL || (m->m_flags & (M_BCAST | M_MCAST)) == 0) 2104 ETHER_BPF_MTAP(ifp, m); 2105 2106 /* run the packet filter */ 2107 if (PFIL_HOOKED(&V_inet_pfil_hook) 2108 #ifdef INET6 2109 || PFIL_HOOKED(&V_inet6_pfil_hook) 2110 #endif 2111 ) { 2112 BRIDGE_UNLOCK(sc); 2113 if (bridge_pfil(&m, ifp, src_if, PFIL_IN) != 0) 2114 return; 2115 if (m == NULL) 2116 return; 2117 BRIDGE_LOCK(sc); 2118 } 2119 2120 if (dst_if == NULL) { 2121 bridge_broadcast(sc, src_if, m, 1); 2122 return; 2123 } 2124 2125 /* 2126 * At this point, we're dealing with a unicast frame 2127 * going to a different interface. 2128 */ 2129 if ((dst_if->if_drv_flags & IFF_DRV_RUNNING) == 0) 2130 goto drop; 2131 2132 dbif = bridge_lookup_member_if(sc, dst_if); 2133 if (dbif == NULL) 2134 /* Not a member of the bridge (anymore?) */ 2135 goto drop; 2136 2137 /* Private segments can not talk to each other */ 2138 if (sbif->bif_flags & dbif->bif_flags & IFBIF_PRIVATE) 2139 goto drop; 2140 2141 if ((dbif->bif_flags & IFBIF_STP) && 2142 dbif->bif_stp.bp_state == BSTP_IFSTATE_DISCARDING) 2143 goto drop; 2144 2145 BRIDGE_UNLOCK(sc); 2146 2147 if (PFIL_HOOKED(&V_inet_pfil_hook) 2148 #ifdef INET6 2149 || PFIL_HOOKED(&V_inet6_pfil_hook) 2150 #endif 2151 ) { 2152 if (bridge_pfil(&m, ifp, dst_if, PFIL_OUT) != 0) 2153 return; 2154 if (m == NULL) 2155 return; 2156 } 2157 2158 bridge_enqueue(sc, dst_if, m); 2159 return; 2160 2161 drop: 2162 BRIDGE_UNLOCK(sc); 2163 m_freem(m); 2164 } 2165 2166 /* 2167 * bridge_input: 2168 * 2169 * Receive input from a member interface. Queue the packet for 2170 * bridging if it is not for us. 2171 */ 2172 static struct mbuf * 2173 bridge_input(struct ifnet *ifp, struct mbuf *m) 2174 { 2175 struct bridge_softc *sc = ifp->if_bridge; 2176 struct bridge_iflist *bif, *bif2; 2177 struct ifnet *bifp; 2178 struct ether_header *eh; 2179 struct mbuf *mc, *mc2; 2180 uint16_t vlan; 2181 int error; 2182 2183 if ((sc->sc_ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) 2184 return (m); 2185 2186 bifp = sc->sc_ifp; 2187 vlan = VLANTAGOF(m); 2188 2189 /* 2190 * Implement support for bridge monitoring. If this flag has been 2191 * set on this interface, discard the packet once we push it through 2192 * the bpf(4) machinery, but before we do, increment the byte and 2193 * packet counters associated with this interface. 2194 */ 2195 if ((bifp->if_flags & IFF_MONITOR) != 0) { 2196 m->m_pkthdr.rcvif = bifp; 2197 ETHER_BPF_MTAP(bifp, m); 2198 bifp->if_ipackets++; 2199 bifp->if_ibytes += m->m_pkthdr.len; 2200 m_freem(m); 2201 return (NULL); 2202 } 2203 BRIDGE_LOCK(sc); 2204 bif = bridge_lookup_member_if(sc, ifp); 2205 if (bif == NULL) { 2206 BRIDGE_UNLOCK(sc); 2207 return (m); 2208 } 2209 2210 eh = mtod(m, struct ether_header *); 2211 2212 bridge_span(sc, m); 2213 2214 if (m->m_flags & (M_BCAST|M_MCAST)) { 2215 /* Tap off 802.1D packets; they do not get forwarded. */ 2216 if (memcmp(eh->ether_dhost, bstp_etheraddr, 2217 ETHER_ADDR_LEN) == 0) { 2218 m = bstp_input(&bif->bif_stp, ifp, m); 2219 if (m == NULL) { 2220 BRIDGE_UNLOCK(sc); 2221 return (NULL); 2222 } 2223 } 2224 2225 if ((bif->bif_flags & IFBIF_STP) && 2226 bif->bif_stp.bp_state == BSTP_IFSTATE_DISCARDING) { 2227 BRIDGE_UNLOCK(sc); 2228 return (m); 2229 } 2230 2231 /* 2232 * Make a deep copy of the packet and enqueue the copy 2233 * for bridge processing; return the original packet for 2234 * local processing. 2235 */ 2236 mc = m_dup(m, M_DONTWAIT); 2237 if (mc == NULL) { 2238 BRIDGE_UNLOCK(sc); 2239 return (m); 2240 } 2241 2242 /* Perform the bridge forwarding function with the copy. */ 2243 bridge_forward(sc, bif, mc); 2244 2245 /* 2246 * Reinject the mbuf as arriving on the bridge so we have a 2247 * chance at claiming multicast packets. We can not loop back 2248 * here from ether_input as a bridge is never a member of a 2249 * bridge. 2250 */ 2251 KASSERT(bifp->if_bridge == NULL, 2252 ("loop created in bridge_input")); 2253 mc2 = m_dup(m, M_DONTWAIT); 2254 if (mc2 != NULL) { 2255 /* Keep the layer3 header aligned */ 2256 int i = min(mc2->m_pkthdr.len, max_protohdr); 2257 mc2 = m_copyup(mc2, i, ETHER_ALIGN); 2258 } 2259 if (mc2 != NULL) { 2260 mc2->m_pkthdr.rcvif = bifp; 2261 (*bifp->if_input)(bifp, mc2); 2262 } 2263 2264 /* Return the original packet for local processing. */ 2265 return (m); 2266 } 2267 2268 if ((bif->bif_flags & IFBIF_STP) && 2269 bif->bif_stp.bp_state == BSTP_IFSTATE_DISCARDING) { 2270 BRIDGE_UNLOCK(sc); 2271 return (m); 2272 } 2273 2274 #if (defined(INET) || defined(INET6)) 2275 # define OR_CARP_CHECK_WE_ARE_DST(iface) \ 2276 || ((iface)->if_carp \ 2277 && (*carp_forus_p)((iface), eh->ether_dhost)) 2278 # define OR_CARP_CHECK_WE_ARE_SRC(iface) \ 2279 || ((iface)->if_carp \ 2280 && (*carp_forus_p)((iface), eh->ether_shost)) 2281 #else 2282 # define OR_CARP_CHECK_WE_ARE_DST(iface) 2283 # define OR_CARP_CHECK_WE_ARE_SRC(iface) 2284 #endif 2285 2286 #ifdef INET6 2287 # define OR_PFIL_HOOKED_INET6 \ 2288 || PFIL_HOOKED(&V_inet6_pfil_hook) 2289 #else 2290 # define OR_PFIL_HOOKED_INET6 2291 #endif 2292 2293 #define GRAB_OUR_PACKETS(iface) \ 2294 if ((iface)->if_type == IFT_GIF) \ 2295 continue; \ 2296 /* It is destined for us. */ \ 2297 if (memcmp(IF_LLADDR((iface)), eh->ether_dhost, ETHER_ADDR_LEN) == 0 \ 2298 OR_CARP_CHECK_WE_ARE_DST((iface)) \ 2299 ) { \ 2300 if ((iface)->if_type == IFT_BRIDGE) { \ 2301 ETHER_BPF_MTAP(iface, m); \ 2302 iface->if_ipackets++; \ 2303 /* Filter on the physical interface. */ \ 2304 if (pfil_local_phys && \ 2305 (PFIL_HOOKED(&V_inet_pfil_hook) \ 2306 OR_PFIL_HOOKED_INET6)) { \ 2307 if (bridge_pfil(&m, NULL, ifp, \ 2308 PFIL_IN) != 0 || m == NULL) { \ 2309 BRIDGE_UNLOCK(sc); \ 2310 return (NULL); \ 2311 } \ 2312 } \ 2313 } \ 2314 if (bif->bif_flags & IFBIF_LEARNING) { \ 2315 error = bridge_rtupdate(sc, eh->ether_shost, \ 2316 vlan, bif, 0, IFBAF_DYNAMIC); \ 2317 if (error && bif->bif_addrmax) { \ 2318 BRIDGE_UNLOCK(sc); \ 2319 m_freem(m); \ 2320 return (NULL); \ 2321 } \ 2322 } \ 2323 m->m_pkthdr.rcvif = iface; \ 2324 BRIDGE_UNLOCK(sc); \ 2325 return (m); \ 2326 } \ 2327 \ 2328 /* We just received a packet that we sent out. */ \ 2329 if (memcmp(IF_LLADDR((iface)), eh->ether_shost, ETHER_ADDR_LEN) == 0 \ 2330 OR_CARP_CHECK_WE_ARE_SRC((iface)) \ 2331 ) { \ 2332 BRIDGE_UNLOCK(sc); \ 2333 m_freem(m); \ 2334 return (NULL); \ 2335 } 2336 2337 /* 2338 * Unicast. Make sure it's not for the bridge. 2339 */ 2340 do { GRAB_OUR_PACKETS(bifp) } while (0); 2341 2342 /* 2343 * Give a chance for ifp at first priority. This will help when the 2344 * packet comes through the interface like VLAN's with the same MACs 2345 * on several interfaces from the same bridge. This also will save 2346 * some CPU cycles in case the destination interface and the input 2347 * interface (eq ifp) are the same. 2348 */ 2349 do { GRAB_OUR_PACKETS(ifp) } while (0); 2350 2351 /* Now check the all bridge members. */ 2352 LIST_FOREACH(bif2, &sc->sc_iflist, bif_next) { 2353 GRAB_OUR_PACKETS(bif2->bif_ifp) 2354 } 2355 2356 #undef OR_CARP_CHECK_WE_ARE_DST 2357 #undef OR_CARP_CHECK_WE_ARE_SRC 2358 #undef OR_PFIL_HOOKED_INET6 2359 #undef GRAB_OUR_PACKETS 2360 2361 /* Perform the bridge forwarding function. */ 2362 bridge_forward(sc, bif, m); 2363 2364 return (NULL); 2365 } 2366 2367 /* 2368 * bridge_broadcast: 2369 * 2370 * Send a frame to all interfaces that are members of 2371 * the bridge, except for the one on which the packet 2372 * arrived. 2373 * 2374 * NOTE: Releases the lock on return. 2375 */ 2376 static void 2377 bridge_broadcast(struct bridge_softc *sc, struct ifnet *src_if, 2378 struct mbuf *m, int runfilt) 2379 { 2380 struct bridge_iflist *dbif, *sbif; 2381 struct mbuf *mc; 2382 struct ifnet *dst_if; 2383 int error = 0, used = 0, i; 2384 2385 sbif = bridge_lookup_member_if(sc, src_if); 2386 2387 BRIDGE_LOCK2REF(sc, error); 2388 if (error) { 2389 m_freem(m); 2390 return; 2391 } 2392 2393 /* Filter on the bridge interface before broadcasting */ 2394 if (runfilt && (PFIL_HOOKED(&V_inet_pfil_hook) 2395 #ifdef INET6 2396 || PFIL_HOOKED(&V_inet6_pfil_hook) 2397 #endif 2398 )) { 2399 if (bridge_pfil(&m, sc->sc_ifp, NULL, PFIL_OUT) != 0) 2400 goto out; 2401 if (m == NULL) 2402 goto out; 2403 } 2404 2405 LIST_FOREACH(dbif, &sc->sc_iflist, bif_next) { 2406 dst_if = dbif->bif_ifp; 2407 if (dst_if == src_if) 2408 continue; 2409 2410 /* Private segments can not talk to each other */ 2411 if (sbif && (sbif->bif_flags & dbif->bif_flags & IFBIF_PRIVATE)) 2412 continue; 2413 2414 if ((dbif->bif_flags & IFBIF_STP) && 2415 dbif->bif_stp.bp_state == BSTP_IFSTATE_DISCARDING) 2416 continue; 2417 2418 if ((dbif->bif_flags & IFBIF_DISCOVER) == 0 && 2419 (m->m_flags & (M_BCAST|M_MCAST)) == 0) 2420 continue; 2421 2422 if ((dst_if->if_drv_flags & IFF_DRV_RUNNING) == 0) 2423 continue; 2424 2425 if (LIST_NEXT(dbif, bif_next) == NULL) { 2426 mc = m; 2427 used = 1; 2428 } else { 2429 mc = m_dup(m, M_DONTWAIT); 2430 if (mc == NULL) { 2431 sc->sc_ifp->if_oerrors++; 2432 continue; 2433 } 2434 } 2435 2436 /* 2437 * Filter on the output interface. Pass a NULL bridge interface 2438 * pointer so we do not redundantly filter on the bridge for 2439 * each interface we broadcast on. 2440 */ 2441 if (runfilt && (PFIL_HOOKED(&V_inet_pfil_hook) 2442 #ifdef INET6 2443 || PFIL_HOOKED(&V_inet6_pfil_hook) 2444 #endif 2445 )) { 2446 if (used == 0) { 2447 /* Keep the layer3 header aligned */ 2448 i = min(mc->m_pkthdr.len, max_protohdr); 2449 mc = m_copyup(mc, i, ETHER_ALIGN); 2450 if (mc == NULL) { 2451 sc->sc_ifp->if_oerrors++; 2452 continue; 2453 } 2454 } 2455 if (bridge_pfil(&mc, NULL, dst_if, PFIL_OUT) != 0) 2456 continue; 2457 if (mc == NULL) 2458 continue; 2459 } 2460 2461 bridge_enqueue(sc, dst_if, mc); 2462 } 2463 if (used == 0) 2464 m_freem(m); 2465 2466 out: 2467 BRIDGE_UNREF(sc); 2468 } 2469 2470 /* 2471 * bridge_span: 2472 * 2473 * Duplicate a packet out one or more interfaces that are in span mode, 2474 * the original mbuf is unmodified. 2475 */ 2476 static void 2477 bridge_span(struct bridge_softc *sc, struct mbuf *m) 2478 { 2479 struct bridge_iflist *bif; 2480 struct ifnet *dst_if; 2481 struct mbuf *mc; 2482 2483 if (LIST_EMPTY(&sc->sc_spanlist)) 2484 return; 2485 2486 LIST_FOREACH(bif, &sc->sc_spanlist, bif_next) { 2487 dst_if = bif->bif_ifp; 2488 2489 if ((dst_if->if_drv_flags & IFF_DRV_RUNNING) == 0) 2490 continue; 2491 2492 mc = m_copypacket(m, M_DONTWAIT); 2493 if (mc == NULL) { 2494 sc->sc_ifp->if_oerrors++; 2495 continue; 2496 } 2497 2498 bridge_enqueue(sc, dst_if, mc); 2499 } 2500 } 2501 2502 /* 2503 * bridge_rtupdate: 2504 * 2505 * Add a bridge routing entry. 2506 */ 2507 static int 2508 bridge_rtupdate(struct bridge_softc *sc, const uint8_t *dst, uint16_t vlan, 2509 struct bridge_iflist *bif, int setflags, uint8_t flags) 2510 { 2511 struct bridge_rtnode *brt; 2512 int error; 2513 2514 BRIDGE_LOCK_ASSERT(sc); 2515 2516 /* Check the source address is valid and not multicast. */ 2517 if (ETHER_IS_MULTICAST(dst) || 2518 (dst[0] == 0 && dst[1] == 0 && dst[2] == 0 && 2519 dst[3] == 0 && dst[4] == 0 && dst[5] == 0) != 0) 2520 return (EINVAL); 2521 2522 /* 802.1p frames map to vlan 1 */ 2523 if (vlan == 0) 2524 vlan = 1; 2525 2526 /* 2527 * A route for this destination might already exist. If so, 2528 * update it, otherwise create a new one. 2529 */ 2530 if ((brt = bridge_rtnode_lookup(sc, dst, vlan)) == NULL) { 2531 if (sc->sc_brtcnt >= sc->sc_brtmax) { 2532 sc->sc_brtexceeded++; 2533 return (ENOSPC); 2534 } 2535 /* Check per interface address limits (if enabled) */ 2536 if (bif->bif_addrmax && bif->bif_addrcnt >= bif->bif_addrmax) { 2537 bif->bif_addrexceeded++; 2538 return (ENOSPC); 2539 } 2540 2541 /* 2542 * Allocate a new bridge forwarding node, and 2543 * initialize the expiration time and Ethernet 2544 * address. 2545 */ 2546 brt = uma_zalloc(bridge_rtnode_zone, M_NOWAIT | M_ZERO); 2547 if (brt == NULL) 2548 return (ENOMEM); 2549 2550 if (bif->bif_flags & IFBIF_STICKY) 2551 brt->brt_flags = IFBAF_STICKY; 2552 else 2553 brt->brt_flags = IFBAF_DYNAMIC; 2554 2555 memcpy(brt->brt_addr, dst, ETHER_ADDR_LEN); 2556 brt->brt_vlan = vlan; 2557 2558 if ((error = bridge_rtnode_insert(sc, brt)) != 0) { 2559 uma_zfree(bridge_rtnode_zone, brt); 2560 return (error); 2561 } 2562 brt->brt_dst = bif; 2563 bif->bif_addrcnt++; 2564 } 2565 2566 if ((brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC && 2567 brt->brt_dst != bif) { 2568 brt->brt_dst->bif_addrcnt--; 2569 brt->brt_dst = bif; 2570 brt->brt_dst->bif_addrcnt++; 2571 } 2572 2573 if ((flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC) 2574 brt->brt_expire = time_uptime + sc->sc_brttimeout; 2575 if (setflags) 2576 brt->brt_flags = flags; 2577 2578 return (0); 2579 } 2580 2581 /* 2582 * bridge_rtlookup: 2583 * 2584 * Lookup the destination interface for an address. 2585 */ 2586 static struct ifnet * 2587 bridge_rtlookup(struct bridge_softc *sc, const uint8_t *addr, uint16_t vlan) 2588 { 2589 struct bridge_rtnode *brt; 2590 2591 BRIDGE_LOCK_ASSERT(sc); 2592 2593 if ((brt = bridge_rtnode_lookup(sc, addr, vlan)) == NULL) 2594 return (NULL); 2595 2596 return (brt->brt_ifp); 2597 } 2598 2599 /* 2600 * bridge_rttrim: 2601 * 2602 * Trim the routine table so that we have a number 2603 * of routing entries less than or equal to the 2604 * maximum number. 2605 */ 2606 static void 2607 bridge_rttrim(struct bridge_softc *sc) 2608 { 2609 struct bridge_rtnode *brt, *nbrt; 2610 2611 BRIDGE_LOCK_ASSERT(sc); 2612 2613 /* Make sure we actually need to do this. */ 2614 if (sc->sc_brtcnt <= sc->sc_brtmax) 2615 return; 2616 2617 /* Force an aging cycle; this might trim enough addresses. */ 2618 bridge_rtage(sc); 2619 if (sc->sc_brtcnt <= sc->sc_brtmax) 2620 return; 2621 2622 LIST_FOREACH_SAFE(brt, &sc->sc_rtlist, brt_list, nbrt) { 2623 if ((brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC) { 2624 bridge_rtnode_destroy(sc, brt); 2625 if (sc->sc_brtcnt <= sc->sc_brtmax) 2626 return; 2627 } 2628 } 2629 } 2630 2631 /* 2632 * bridge_timer: 2633 * 2634 * Aging timer for the bridge. 2635 */ 2636 static void 2637 bridge_timer(void *arg) 2638 { 2639 struct bridge_softc *sc = arg; 2640 2641 BRIDGE_LOCK_ASSERT(sc); 2642 2643 bridge_rtage(sc); 2644 2645 if (sc->sc_ifp->if_drv_flags & IFF_DRV_RUNNING) 2646 callout_reset(&sc->sc_brcallout, 2647 bridge_rtable_prune_period * hz, bridge_timer, sc); 2648 } 2649 2650 /* 2651 * bridge_rtage: 2652 * 2653 * Perform an aging cycle. 2654 */ 2655 static void 2656 bridge_rtage(struct bridge_softc *sc) 2657 { 2658 struct bridge_rtnode *brt, *nbrt; 2659 2660 BRIDGE_LOCK_ASSERT(sc); 2661 2662 LIST_FOREACH_SAFE(brt, &sc->sc_rtlist, brt_list, nbrt) { 2663 if ((brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC) { 2664 if (time_uptime >= brt->brt_expire) 2665 bridge_rtnode_destroy(sc, brt); 2666 } 2667 } 2668 } 2669 2670 /* 2671 * bridge_rtflush: 2672 * 2673 * Remove all dynamic addresses from the bridge. 2674 */ 2675 static void 2676 bridge_rtflush(struct bridge_softc *sc, int full) 2677 { 2678 struct bridge_rtnode *brt, *nbrt; 2679 2680 BRIDGE_LOCK_ASSERT(sc); 2681 2682 LIST_FOREACH_SAFE(brt, &sc->sc_rtlist, brt_list, nbrt) { 2683 if (full || (brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC) 2684 bridge_rtnode_destroy(sc, brt); 2685 } 2686 } 2687 2688 /* 2689 * bridge_rtdaddr: 2690 * 2691 * Remove an address from the table. 2692 */ 2693 static int 2694 bridge_rtdaddr(struct bridge_softc *sc, const uint8_t *addr, uint16_t vlan) 2695 { 2696 struct bridge_rtnode *brt; 2697 int found = 0; 2698 2699 BRIDGE_LOCK_ASSERT(sc); 2700 2701 /* 2702 * If vlan is zero then we want to delete for all vlans so the lookup 2703 * may return more than one. 2704 */ 2705 while ((brt = bridge_rtnode_lookup(sc, addr, vlan)) != NULL) { 2706 bridge_rtnode_destroy(sc, brt); 2707 found = 1; 2708 } 2709 2710 return (found ? 0 : ENOENT); 2711 } 2712 2713 /* 2714 * bridge_rtdelete: 2715 * 2716 * Delete routes to a speicifc member interface. 2717 */ 2718 static void 2719 bridge_rtdelete(struct bridge_softc *sc, struct ifnet *ifp, int full) 2720 { 2721 struct bridge_rtnode *brt, *nbrt; 2722 2723 BRIDGE_LOCK_ASSERT(sc); 2724 2725 LIST_FOREACH_SAFE(brt, &sc->sc_rtlist, brt_list, nbrt) { 2726 if (brt->brt_ifp == ifp && (full || 2727 (brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC)) 2728 bridge_rtnode_destroy(sc, brt); 2729 } 2730 } 2731 2732 /* 2733 * bridge_rtable_init: 2734 * 2735 * Initialize the route table for this bridge. 2736 */ 2737 static int 2738 bridge_rtable_init(struct bridge_softc *sc) 2739 { 2740 int i; 2741 2742 sc->sc_rthash = malloc(sizeof(*sc->sc_rthash) * BRIDGE_RTHASH_SIZE, 2743 M_DEVBUF, M_NOWAIT); 2744 if (sc->sc_rthash == NULL) 2745 return (ENOMEM); 2746 2747 for (i = 0; i < BRIDGE_RTHASH_SIZE; i++) 2748 LIST_INIT(&sc->sc_rthash[i]); 2749 2750 sc->sc_rthash_key = arc4random(); 2751 2752 LIST_INIT(&sc->sc_rtlist); 2753 2754 return (0); 2755 } 2756 2757 /* 2758 * bridge_rtable_fini: 2759 * 2760 * Deconstruct the route table for this bridge. 2761 */ 2762 static void 2763 bridge_rtable_fini(struct bridge_softc *sc) 2764 { 2765 2766 KASSERT(sc->sc_brtcnt == 0, 2767 ("%s: %d bridge routes referenced", __func__, sc->sc_brtcnt)); 2768 free(sc->sc_rthash, M_DEVBUF); 2769 } 2770 2771 /* 2772 * The following hash function is adapted from "Hash Functions" by Bob Jenkins 2773 * ("Algorithm Alley", Dr. Dobbs Journal, September 1997). 2774 */ 2775 #define mix(a, b, c) \ 2776 do { \ 2777 a -= b; a -= c; a ^= (c >> 13); \ 2778 b -= c; b -= a; b ^= (a << 8); \ 2779 c -= a; c -= b; c ^= (b >> 13); \ 2780 a -= b; a -= c; a ^= (c >> 12); \ 2781 b -= c; b -= a; b ^= (a << 16); \ 2782 c -= a; c -= b; c ^= (b >> 5); \ 2783 a -= b; a -= c; a ^= (c >> 3); \ 2784 b -= c; b -= a; b ^= (a << 10); \ 2785 c -= a; c -= b; c ^= (b >> 15); \ 2786 } while (/*CONSTCOND*/0) 2787 2788 static __inline uint32_t 2789 bridge_rthash(struct bridge_softc *sc, const uint8_t *addr) 2790 { 2791 uint32_t a = 0x9e3779b9, b = 0x9e3779b9, c = sc->sc_rthash_key; 2792 2793 b += addr[5] << 8; 2794 b += addr[4]; 2795 a += addr[3] << 24; 2796 a += addr[2] << 16; 2797 a += addr[1] << 8; 2798 a += addr[0]; 2799 2800 mix(a, b, c); 2801 2802 return (c & BRIDGE_RTHASH_MASK); 2803 } 2804 2805 #undef mix 2806 2807 static int 2808 bridge_rtnode_addr_cmp(const uint8_t *a, const uint8_t *b) 2809 { 2810 int i, d; 2811 2812 for (i = 0, d = 0; i < ETHER_ADDR_LEN && d == 0; i++) { 2813 d = ((int)a[i]) - ((int)b[i]); 2814 } 2815 2816 return (d); 2817 } 2818 2819 /* 2820 * bridge_rtnode_lookup: 2821 * 2822 * Look up a bridge route node for the specified destination. Compare the 2823 * vlan id or if zero then just return the first match. 2824 */ 2825 static struct bridge_rtnode * 2826 bridge_rtnode_lookup(struct bridge_softc *sc, const uint8_t *addr, uint16_t vlan) 2827 { 2828 struct bridge_rtnode *brt; 2829 uint32_t hash; 2830 int dir; 2831 2832 BRIDGE_LOCK_ASSERT(sc); 2833 2834 hash = bridge_rthash(sc, addr); 2835 LIST_FOREACH(brt, &sc->sc_rthash[hash], brt_hash) { 2836 dir = bridge_rtnode_addr_cmp(addr, brt->brt_addr); 2837 if (dir == 0 && (brt->brt_vlan == vlan || vlan == 0)) 2838 return (brt); 2839 if (dir > 0) 2840 return (NULL); 2841 } 2842 2843 return (NULL); 2844 } 2845 2846 /* 2847 * bridge_rtnode_insert: 2848 * 2849 * Insert the specified bridge node into the route table. We 2850 * assume the entry is not already in the table. 2851 */ 2852 static int 2853 bridge_rtnode_insert(struct bridge_softc *sc, struct bridge_rtnode *brt) 2854 { 2855 struct bridge_rtnode *lbrt; 2856 uint32_t hash; 2857 int dir; 2858 2859 BRIDGE_LOCK_ASSERT(sc); 2860 2861 hash = bridge_rthash(sc, brt->brt_addr); 2862 2863 lbrt = LIST_FIRST(&sc->sc_rthash[hash]); 2864 if (lbrt == NULL) { 2865 LIST_INSERT_HEAD(&sc->sc_rthash[hash], brt, brt_hash); 2866 goto out; 2867 } 2868 2869 do { 2870 dir = bridge_rtnode_addr_cmp(brt->brt_addr, lbrt->brt_addr); 2871 if (dir == 0 && brt->brt_vlan == lbrt->brt_vlan) 2872 return (EEXIST); 2873 if (dir > 0) { 2874 LIST_INSERT_BEFORE(lbrt, brt, brt_hash); 2875 goto out; 2876 } 2877 if (LIST_NEXT(lbrt, brt_hash) == NULL) { 2878 LIST_INSERT_AFTER(lbrt, brt, brt_hash); 2879 goto out; 2880 } 2881 lbrt = LIST_NEXT(lbrt, brt_hash); 2882 } while (lbrt != NULL); 2883 2884 #ifdef DIAGNOSTIC 2885 panic("bridge_rtnode_insert: impossible"); 2886 #endif 2887 2888 out: 2889 LIST_INSERT_HEAD(&sc->sc_rtlist, brt, brt_list); 2890 sc->sc_brtcnt++; 2891 2892 return (0); 2893 } 2894 2895 /* 2896 * bridge_rtnode_destroy: 2897 * 2898 * Destroy a bridge rtnode. 2899 */ 2900 static void 2901 bridge_rtnode_destroy(struct bridge_softc *sc, struct bridge_rtnode *brt) 2902 { 2903 BRIDGE_LOCK_ASSERT(sc); 2904 2905 LIST_REMOVE(brt, brt_hash); 2906 2907 LIST_REMOVE(brt, brt_list); 2908 sc->sc_brtcnt--; 2909 brt->brt_dst->bif_addrcnt--; 2910 uma_zfree(bridge_rtnode_zone, brt); 2911 } 2912 2913 /* 2914 * bridge_rtable_expire: 2915 * 2916 * Set the expiry time for all routes on an interface. 2917 */ 2918 static void 2919 bridge_rtable_expire(struct ifnet *ifp, int age) 2920 { 2921 struct bridge_softc *sc = ifp->if_bridge; 2922 struct bridge_rtnode *brt; 2923 2924 BRIDGE_LOCK(sc); 2925 2926 /* 2927 * If the age is zero then flush, otherwise set all the expiry times to 2928 * age for the interface 2929 */ 2930 if (age == 0) 2931 bridge_rtdelete(sc, ifp, IFBF_FLUSHDYN); 2932 else { 2933 LIST_FOREACH(brt, &sc->sc_rtlist, brt_list) { 2934 /* Cap the expiry time to 'age' */ 2935 if (brt->brt_ifp == ifp && 2936 brt->brt_expire > time_uptime + age && 2937 (brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC) 2938 brt->brt_expire = time_uptime + age; 2939 } 2940 } 2941 BRIDGE_UNLOCK(sc); 2942 } 2943 2944 /* 2945 * bridge_state_change: 2946 * 2947 * Callback from the bridgestp code when a port changes states. 2948 */ 2949 static void 2950 bridge_state_change(struct ifnet *ifp, int state) 2951 { 2952 struct bridge_softc *sc = ifp->if_bridge; 2953 static const char *stpstates[] = { 2954 "disabled", 2955 "listening", 2956 "learning", 2957 "forwarding", 2958 "blocking", 2959 "discarding" 2960 }; 2961 2962 if (log_stp) 2963 log(LOG_NOTICE, "%s: state changed to %s on %s\n", 2964 sc->sc_ifp->if_xname, stpstates[state], ifp->if_xname); 2965 } 2966 2967 /* 2968 * Send bridge packets through pfil if they are one of the types pfil can deal 2969 * with, or if they are ARP or REVARP. (pfil will pass ARP and REVARP without 2970 * question.) If *bifp or *ifp are NULL then packet filtering is skipped for 2971 * that interface. 2972 */ 2973 static int 2974 bridge_pfil(struct mbuf **mp, struct ifnet *bifp, struct ifnet *ifp, int dir) 2975 { 2976 int snap, error, i, hlen; 2977 struct ether_header *eh1, eh2; 2978 struct ip_fw_args args; 2979 struct ip *ip; 2980 struct llc llc1; 2981 u_int16_t ether_type; 2982 2983 snap = 0; 2984 error = -1; /* Default error if not error == 0 */ 2985 2986 #if 0 2987 /* we may return with the IP fields swapped, ensure its not shared */ 2988 KASSERT(M_WRITABLE(*mp), ("%s: modifying a shared mbuf", __func__)); 2989 #endif 2990 2991 if (pfil_bridge == 0 && pfil_member == 0 && pfil_ipfw == 0) 2992 return (0); /* filtering is disabled */ 2993 2994 i = min((*mp)->m_pkthdr.len, max_protohdr); 2995 if ((*mp)->m_len < i) { 2996 *mp = m_pullup(*mp, i); 2997 if (*mp == NULL) { 2998 printf("%s: m_pullup failed\n", __func__); 2999 return (-1); 3000 } 3001 } 3002 3003 eh1 = mtod(*mp, struct ether_header *); 3004 ether_type = ntohs(eh1->ether_type); 3005 3006 /* 3007 * Check for SNAP/LLC. 3008 */ 3009 if (ether_type < ETHERMTU) { 3010 struct llc *llc2 = (struct llc *)(eh1 + 1); 3011 3012 if ((*mp)->m_len >= ETHER_HDR_LEN + 8 && 3013 llc2->llc_dsap == LLC_SNAP_LSAP && 3014 llc2->llc_ssap == LLC_SNAP_LSAP && 3015 llc2->llc_control == LLC_UI) { 3016 ether_type = htons(llc2->llc_un.type_snap.ether_type); 3017 snap = 1; 3018 } 3019 } 3020 3021 /* 3022 * If we're trying to filter bridge traffic, don't look at anything 3023 * other than IP and ARP traffic. If the filter doesn't understand 3024 * IPv6, don't allow IPv6 through the bridge either. This is lame 3025 * since if we really wanted, say, an AppleTalk filter, we are hosed, 3026 * but of course we don't have an AppleTalk filter to begin with. 3027 * (Note that since pfil doesn't understand ARP it will pass *ALL* 3028 * ARP traffic.) 3029 */ 3030 switch (ether_type) { 3031 case ETHERTYPE_ARP: 3032 case ETHERTYPE_REVARP: 3033 if (pfil_ipfw_arp == 0) 3034 return (0); /* Automatically pass */ 3035 break; 3036 3037 case ETHERTYPE_IP: 3038 #ifdef INET6 3039 case ETHERTYPE_IPV6: 3040 #endif /* INET6 */ 3041 break; 3042 default: 3043 /* 3044 * Check to see if the user wants to pass non-ip 3045 * packets, these will not be checked by pfil(9) and 3046 * passed unconditionally so the default is to drop. 3047 */ 3048 if (pfil_onlyip) 3049 goto bad; 3050 } 3051 3052 /* Strip off the Ethernet header and keep a copy. */ 3053 m_copydata(*mp, 0, ETHER_HDR_LEN, (caddr_t) &eh2); 3054 m_adj(*mp, ETHER_HDR_LEN); 3055 3056 /* Strip off snap header, if present */ 3057 if (snap) { 3058 m_copydata(*mp, 0, sizeof(struct llc), (caddr_t) &llc1); 3059 m_adj(*mp, sizeof(struct llc)); 3060 } 3061 3062 /* 3063 * Check the IP header for alignment and errors 3064 */ 3065 if (dir == PFIL_IN) { 3066 switch (ether_type) { 3067 case ETHERTYPE_IP: 3068 error = bridge_ip_checkbasic(mp); 3069 break; 3070 #ifdef INET6 3071 case ETHERTYPE_IPV6: 3072 error = bridge_ip6_checkbasic(mp); 3073 break; 3074 #endif /* INET6 */ 3075 default: 3076 error = 0; 3077 } 3078 if (error) 3079 goto bad; 3080 } 3081 3082 /* XXX this section is also in if_ethersubr.c */ 3083 // XXX PFIL_OUT or DIR_OUT ? 3084 if (V_ip_fw_chk_ptr && pfil_ipfw != 0 && 3085 dir == PFIL_OUT && ifp != NULL) { 3086 struct m_tag *mtag; 3087 3088 error = -1; 3089 /* fetch the start point from existing tags, if any */ 3090 mtag = m_tag_locate(*mp, MTAG_IPFW_RULE, 0, NULL); 3091 if (mtag == NULL) { 3092 args.rule.slot = 0; 3093 } else { 3094 struct ipfw_rule_ref *r; 3095 3096 /* XXX can we free the tag after use ? */ 3097 mtag->m_tag_id = PACKET_TAG_NONE; 3098 r = (struct ipfw_rule_ref *)(mtag + 1); 3099 /* packet already partially processed ? */ 3100 if (r->info & IPFW_ONEPASS) 3101 goto ipfwpass; 3102 args.rule = *r; 3103 } 3104 3105 args.m = *mp; 3106 args.oif = ifp; 3107 args.next_hop = NULL; 3108 args.next_hop6 = NULL; 3109 args.eh = &eh2; 3110 args.inp = NULL; /* used by ipfw uid/gid/jail rules */ 3111 i = V_ip_fw_chk_ptr(&args); 3112 *mp = args.m; 3113 3114 if (*mp == NULL) 3115 return (error); 3116 3117 if (ip_dn_io_ptr && (i == IP_FW_DUMMYNET)) { 3118 3119 /* put the Ethernet header back on */ 3120 M_PREPEND(*mp, ETHER_HDR_LEN, M_DONTWAIT); 3121 if (*mp == NULL) 3122 return (error); 3123 bcopy(&eh2, mtod(*mp, caddr_t), ETHER_HDR_LEN); 3124 3125 /* 3126 * Pass the pkt to dummynet, which consumes it. The 3127 * packet will return to us via bridge_dummynet(). 3128 */ 3129 args.oif = ifp; 3130 ip_dn_io_ptr(mp, DIR_FWD | PROTO_IFB, &args); 3131 return (error); 3132 } 3133 3134 if (i != IP_FW_PASS) /* drop */ 3135 goto bad; 3136 } 3137 3138 ipfwpass: 3139 error = 0; 3140 3141 /* 3142 * Run the packet through pfil 3143 */ 3144 switch (ether_type) { 3145 case ETHERTYPE_IP: 3146 /* 3147 * before calling the firewall, swap fields the same as 3148 * IP does. here we assume the header is contiguous 3149 */ 3150 ip = mtod(*mp, struct ip *); 3151 3152 ip->ip_len = ntohs(ip->ip_len); 3153 ip->ip_off = ntohs(ip->ip_off); 3154 3155 /* 3156 * Run pfil on the member interface and the bridge, both can 3157 * be skipped by clearing pfil_member or pfil_bridge. 3158 * 3159 * Keep the order: 3160 * in_if -> bridge_if -> out_if 3161 */ 3162 if (pfil_bridge && dir == PFIL_OUT && bifp != NULL) 3163 error = pfil_run_hooks(&V_inet_pfil_hook, mp, bifp, 3164 dir, NULL); 3165 3166 if (*mp == NULL || error != 0) /* filter may consume */ 3167 break; 3168 3169 if (pfil_member && ifp != NULL) 3170 error = pfil_run_hooks(&V_inet_pfil_hook, mp, ifp, 3171 dir, NULL); 3172 3173 if (*mp == NULL || error != 0) /* filter may consume */ 3174 break; 3175 3176 if (pfil_bridge && dir == PFIL_IN && bifp != NULL) 3177 error = pfil_run_hooks(&V_inet_pfil_hook, mp, bifp, 3178 dir, NULL); 3179 3180 if (*mp == NULL || error != 0) /* filter may consume */ 3181 break; 3182 3183 /* check if we need to fragment the packet */ 3184 if (pfil_member && ifp != NULL && dir == PFIL_OUT) { 3185 i = (*mp)->m_pkthdr.len; 3186 if (i > ifp->if_mtu) { 3187 error = bridge_fragment(ifp, *mp, &eh2, snap, 3188 &llc1); 3189 return (error); 3190 } 3191 } 3192 3193 /* Recalculate the ip checksum and restore byte ordering */ 3194 ip = mtod(*mp, struct ip *); 3195 hlen = ip->ip_hl << 2; 3196 if (hlen < sizeof(struct ip)) 3197 goto bad; 3198 if (hlen > (*mp)->m_len) { 3199 if ((*mp = m_pullup(*mp, hlen)) == 0) 3200 goto bad; 3201 ip = mtod(*mp, struct ip *); 3202 if (ip == NULL) 3203 goto bad; 3204 } 3205 ip->ip_len = htons(ip->ip_len); 3206 ip->ip_off = htons(ip->ip_off); 3207 ip->ip_sum = 0; 3208 if (hlen == sizeof(struct ip)) 3209 ip->ip_sum = in_cksum_hdr(ip); 3210 else 3211 ip->ip_sum = in_cksum(*mp, hlen); 3212 3213 break; 3214 #ifdef INET6 3215 case ETHERTYPE_IPV6: 3216 if (pfil_bridge && dir == PFIL_OUT && bifp != NULL) 3217 error = pfil_run_hooks(&V_inet6_pfil_hook, mp, bifp, 3218 dir, NULL); 3219 3220 if (*mp == NULL || error != 0) /* filter may consume */ 3221 break; 3222 3223 if (pfil_member && ifp != NULL) 3224 error = pfil_run_hooks(&V_inet6_pfil_hook, mp, ifp, 3225 dir, NULL); 3226 3227 if (*mp == NULL || error != 0) /* filter may consume */ 3228 break; 3229 3230 if (pfil_bridge && dir == PFIL_IN && bifp != NULL) 3231 error = pfil_run_hooks(&V_inet6_pfil_hook, mp, bifp, 3232 dir, NULL); 3233 break; 3234 #endif 3235 default: 3236 error = 0; 3237 break; 3238 } 3239 3240 if (*mp == NULL) 3241 return (error); 3242 if (error != 0) 3243 goto bad; 3244 3245 error = -1; 3246 3247 /* 3248 * Finally, put everything back the way it was and return 3249 */ 3250 if (snap) { 3251 M_PREPEND(*mp, sizeof(struct llc), M_DONTWAIT); 3252 if (*mp == NULL) 3253 return (error); 3254 bcopy(&llc1, mtod(*mp, caddr_t), sizeof(struct llc)); 3255 } 3256 3257 M_PREPEND(*mp, ETHER_HDR_LEN, M_DONTWAIT); 3258 if (*mp == NULL) 3259 return (error); 3260 bcopy(&eh2, mtod(*mp, caddr_t), ETHER_HDR_LEN); 3261 3262 return (0); 3263 3264 bad: 3265 m_freem(*mp); 3266 *mp = NULL; 3267 return (error); 3268 } 3269 3270 /* 3271 * Perform basic checks on header size since 3272 * pfil assumes ip_input has already processed 3273 * it for it. Cut-and-pasted from ip_input.c. 3274 * Given how simple the IPv6 version is, 3275 * does the IPv4 version really need to be 3276 * this complicated? 3277 * 3278 * XXX Should we update ipstat here, or not? 3279 * XXX Right now we update ipstat but not 3280 * XXX csum_counter. 3281 */ 3282 static int 3283 bridge_ip_checkbasic(struct mbuf **mp) 3284 { 3285 struct mbuf *m = *mp; 3286 struct ip *ip; 3287 int len, hlen; 3288 u_short sum; 3289 3290 if (*mp == NULL) 3291 return (-1); 3292 3293 if (IP_HDR_ALIGNED_P(mtod(m, caddr_t)) == 0) { 3294 if ((m = m_copyup(m, sizeof(struct ip), 3295 (max_linkhdr + 3) & ~3)) == NULL) { 3296 /* XXXJRT new stat, please */ 3297 KMOD_IPSTAT_INC(ips_toosmall); 3298 goto bad; 3299 } 3300 } else if (__predict_false(m->m_len < sizeof (struct ip))) { 3301 if ((m = m_pullup(m, sizeof (struct ip))) == NULL) { 3302 KMOD_IPSTAT_INC(ips_toosmall); 3303 goto bad; 3304 } 3305 } 3306 ip = mtod(m, struct ip *); 3307 if (ip == NULL) goto bad; 3308 3309 if (ip->ip_v != IPVERSION) { 3310 KMOD_IPSTAT_INC(ips_badvers); 3311 goto bad; 3312 } 3313 hlen = ip->ip_hl << 2; 3314 if (hlen < sizeof(struct ip)) { /* minimum header length */ 3315 KMOD_IPSTAT_INC(ips_badhlen); 3316 goto bad; 3317 } 3318 if (hlen > m->m_len) { 3319 if ((m = m_pullup(m, hlen)) == 0) { 3320 KMOD_IPSTAT_INC(ips_badhlen); 3321 goto bad; 3322 } 3323 ip = mtod(m, struct ip *); 3324 if (ip == NULL) goto bad; 3325 } 3326 3327 if (m->m_pkthdr.csum_flags & CSUM_IP_CHECKED) { 3328 sum = !(m->m_pkthdr.csum_flags & CSUM_IP_VALID); 3329 } else { 3330 if (hlen == sizeof(struct ip)) { 3331 sum = in_cksum_hdr(ip); 3332 } else { 3333 sum = in_cksum(m, hlen); 3334 } 3335 } 3336 if (sum) { 3337 KMOD_IPSTAT_INC(ips_badsum); 3338 goto bad; 3339 } 3340 3341 /* Retrieve the packet length. */ 3342 len = ntohs(ip->ip_len); 3343 3344 /* 3345 * Check for additional length bogosity 3346 */ 3347 if (len < hlen) { 3348 KMOD_IPSTAT_INC(ips_badlen); 3349 goto bad; 3350 } 3351 3352 /* 3353 * Check that the amount of data in the buffers 3354 * is as at least much as the IP header would have us expect. 3355 * Drop packet if shorter than we expect. 3356 */ 3357 if (m->m_pkthdr.len < len) { 3358 KMOD_IPSTAT_INC(ips_tooshort); 3359 goto bad; 3360 } 3361 3362 /* Checks out, proceed */ 3363 *mp = m; 3364 return (0); 3365 3366 bad: 3367 *mp = m; 3368 return (-1); 3369 } 3370 3371 #ifdef INET6 3372 /* 3373 * Same as above, but for IPv6. 3374 * Cut-and-pasted from ip6_input.c. 3375 * XXX Should we update ip6stat, or not? 3376 */ 3377 static int 3378 bridge_ip6_checkbasic(struct mbuf **mp) 3379 { 3380 struct mbuf *m = *mp; 3381 struct ip6_hdr *ip6; 3382 3383 /* 3384 * If the IPv6 header is not aligned, slurp it up into a new 3385 * mbuf with space for link headers, in the event we forward 3386 * it. Otherwise, if it is aligned, make sure the entire base 3387 * IPv6 header is in the first mbuf of the chain. 3388 */ 3389 if (IP6_HDR_ALIGNED_P(mtod(m, caddr_t)) == 0) { 3390 struct ifnet *inifp = m->m_pkthdr.rcvif; 3391 if ((m = m_copyup(m, sizeof(struct ip6_hdr), 3392 (max_linkhdr + 3) & ~3)) == NULL) { 3393 /* XXXJRT new stat, please */ 3394 V_ip6stat.ip6s_toosmall++; 3395 in6_ifstat_inc(inifp, ifs6_in_hdrerr); 3396 goto bad; 3397 } 3398 } else if (__predict_false(m->m_len < sizeof(struct ip6_hdr))) { 3399 struct ifnet *inifp = m->m_pkthdr.rcvif; 3400 if ((m = m_pullup(m, sizeof(struct ip6_hdr))) == NULL) { 3401 V_ip6stat.ip6s_toosmall++; 3402 in6_ifstat_inc(inifp, ifs6_in_hdrerr); 3403 goto bad; 3404 } 3405 } 3406 3407 ip6 = mtod(m, struct ip6_hdr *); 3408 3409 if ((ip6->ip6_vfc & IPV6_VERSION_MASK) != IPV6_VERSION) { 3410 V_ip6stat.ip6s_badvers++; 3411 in6_ifstat_inc(m->m_pkthdr.rcvif, ifs6_in_hdrerr); 3412 goto bad; 3413 } 3414 3415 /* Checks out, proceed */ 3416 *mp = m; 3417 return (0); 3418 3419 bad: 3420 *mp = m; 3421 return (-1); 3422 } 3423 #endif /* INET6 */ 3424 3425 /* 3426 * bridge_fragment: 3427 * 3428 * Return a fragmented mbuf chain. 3429 */ 3430 static int 3431 bridge_fragment(struct ifnet *ifp, struct mbuf *m, struct ether_header *eh, 3432 int snap, struct llc *llc) 3433 { 3434 struct mbuf *m0; 3435 struct ip *ip; 3436 int error = -1; 3437 3438 if (m->m_len < sizeof(struct ip) && 3439 (m = m_pullup(m, sizeof(struct ip))) == NULL) 3440 goto out; 3441 ip = mtod(m, struct ip *); 3442 3443 error = ip_fragment(ip, &m, ifp->if_mtu, ifp->if_hwassist, 3444 CSUM_DELAY_IP); 3445 if (error) 3446 goto out; 3447 3448 /* walk the chain and re-add the Ethernet header */ 3449 for (m0 = m; m0; m0 = m0->m_nextpkt) { 3450 if (error == 0) { 3451 if (snap) { 3452 M_PREPEND(m0, sizeof(struct llc), M_DONTWAIT); 3453 if (m0 == NULL) { 3454 error = ENOBUFS; 3455 continue; 3456 } 3457 bcopy(llc, mtod(m0, caddr_t), 3458 sizeof(struct llc)); 3459 } 3460 M_PREPEND(m0, ETHER_HDR_LEN, M_DONTWAIT); 3461 if (m0 == NULL) { 3462 error = ENOBUFS; 3463 continue; 3464 } 3465 bcopy(eh, mtod(m0, caddr_t), ETHER_HDR_LEN); 3466 } else 3467 m_freem(m); 3468 } 3469 3470 if (error == 0) 3471 KMOD_IPSTAT_INC(ips_fragmented); 3472 3473 return (error); 3474 3475 out: 3476 if (m != NULL) 3477 m_freem(m); 3478 return (error); 3479 } 3480